SequentialTaskQueue: schedule automatically
[portage.git] / pym / _emerge / Scheduler.py
1 # Copyright 1999-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3
4 from __future__ import print_function
5
6 from collections import deque
7 import gc
8 import gzip
9 import logging
10 import signal
11 import sys
12 import textwrap
13 import time
14 import warnings
15 import weakref
16 import zlib
17
18 import portage
19 from portage import os
20 from portage import _encodings
21 from portage import _unicode_decode, _unicode_encode
22 from portage.cache.mappings import slot_dict_class
23 from portage.elog.messages import eerror
24 from portage.localization import _
25 from portage.output import colorize, create_color_func, red
26 bad = create_color_func("BAD")
27 from portage._sets import SETPREFIX
28 from portage._sets.base import InternalPackageSet
29 from portage.util import ensure_dirs, writemsg, writemsg_level
30 from portage.package.ebuild.digestcheck import digestcheck
31 from portage.package.ebuild.digestgen import digestgen
32 from portage.package.ebuild.doebuild import (_check_temp_dir,
33         _prepare_self_update)
34 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
35
36 import _emerge
37 from _emerge.BinpkgFetcher import BinpkgFetcher
38 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
39 from _emerge.BinpkgVerifier import BinpkgVerifier
40 from _emerge.Blocker import Blocker
41 from _emerge.BlockerDB import BlockerDB
42 from _emerge.clear_caches import clear_caches
43 from _emerge.create_depgraph_params import create_depgraph_params
44 from _emerge.create_world_atom import create_world_atom
45 from _emerge.DepPriority import DepPriority
46 from _emerge.depgraph import depgraph, resume_depgraph
47 from _emerge.EbuildBuildDir import EbuildBuildDir
48 from _emerge.EbuildFetcher import EbuildFetcher
49 from _emerge.EbuildPhase import EbuildPhase
50 from _emerge.emergelog import emergelog
51 from _emerge.FakeVartree import FakeVartree
52 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
53 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
54 from _emerge.JobStatusDisplay import JobStatusDisplay
55 from _emerge.MergeListItem import MergeListItem
56 from _emerge.Package import Package
57 from _emerge.PackageMerge import PackageMerge
58 from _emerge.PollScheduler import PollScheduler
59 from _emerge.SlotObject import SlotObject
60 from _emerge.SequentialTaskQueue import SequentialTaskQueue
61
62 if sys.hexversion >= 0x3000000:
63         basestring = str
64
65 class Scheduler(PollScheduler):
66
67         # max time between display status updates (milliseconds)
68         _max_display_latency = 3000
69
70         _opts_ignore_blockers = \
71                 frozenset(["--buildpkgonly",
72                 "--fetchonly", "--fetch-all-uri",
73                 "--nodeps", "--pretend"])
74
75         _opts_no_background = \
76                 frozenset(["--pretend",
77                 "--fetchonly", "--fetch-all-uri"])
78
79         _opts_no_self_update = frozenset(["--buildpkgonly",
80                 "--fetchonly", "--fetch-all-uri", "--pretend"])
81
82         class _iface_class(PollScheduler._sched_iface_class):
83                 __slots__ = ("fetch",
84                         "scheduleSetup", "scheduleUnpack")
85
86         class _fetch_iface_class(SlotObject):
87                 __slots__ = ("log_file", "schedule")
88
89         _task_queues_class = slot_dict_class(
90                 ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
91
92         class _build_opts_class(SlotObject):
93                 __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
94                         "fetch_all_uri", "fetchonly", "pretend")
95
96         class _binpkg_opts_class(SlotObject):
97                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
98
99         class _pkg_count_class(SlotObject):
100                 __slots__ = ("curval", "maxval")
101
102         class _emerge_log_class(SlotObject):
103                 __slots__ = ("xterm_titles",)
104
105                 def log(self, *pargs, **kwargs):
106                         if not self.xterm_titles:
107                                 # Avoid interference with the scheduler's status display.
108                                 kwargs.pop("short_msg", None)
109                         emergelog(self.xterm_titles, *pargs, **kwargs)
110
111         class _failed_pkg(SlotObject):
112                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
113
114         class _ConfigPool(object):
115                 """Interface for a task to temporarily allocate a config
116                 instance from a pool. This allows a task to be constructed
117                 long before the config instance actually becomes needed, like
118                 when prefetchers are constructed for the whole merge list."""
119                 __slots__ = ("_root", "_allocate", "_deallocate")
120                 def __init__(self, root, allocate, deallocate):
121                         self._root = root
122                         self._allocate = allocate
123                         self._deallocate = deallocate
124                 def allocate(self):
125                         return self._allocate(self._root)
126                 def deallocate(self, settings):
127                         self._deallocate(settings)
128
129         class _unknown_internal_error(portage.exception.PortageException):
130                 """
131                 Used internally to terminate scheduling. The specific reason for
132                 the failure should have been dumped to stderr.
133                 """
134                 def __init__(self, value=""):
135                         portage.exception.PortageException.__init__(self, value)
136
137         def __init__(self, settings, trees, mtimedb, myopts,
138                 spinner, mergelist=None, favorites=None, graph_config=None):
139                 PollScheduler.__init__(self)
140
141                 if mergelist is not None:
142                         warnings.warn("The mergelist parameter of the " + \
143                                 "_emerge.Scheduler constructor is now unused. Use " + \
144                                 "the graph_config parameter instead.",
145                                 DeprecationWarning, stacklevel=2)
146
147                 self.settings = settings
148                 self.target_root = settings["EROOT"]
149                 self.trees = trees
150                 self.myopts = myopts
151                 self._spinner = spinner
152                 self._mtimedb = mtimedb
153                 self._favorites = favorites
154                 self._args_set = InternalPackageSet(favorites, allow_repo=True)
155                 self._build_opts = self._build_opts_class()
156
157                 for k in self._build_opts.__slots__:
158                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
159                 self._build_opts.buildpkg_exclude = InternalPackageSet( \
160                         initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
161                         allow_wildcard=True, allow_repo=True)
162
163                 self._binpkg_opts = self._binpkg_opts_class()
164                 for k in self._binpkg_opts.__slots__:
165                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
166
167                 self.curval = 0
168                 self._logger = self._emerge_log_class()
169                 self._task_queues = self._task_queues_class()
170                 for k in self._task_queues.allowed_keys:
171                         setattr(self._task_queues, k,
172                                 SequentialTaskQueue())
173
174                 # Holds merges that will wait to be executed when no builds are
175                 # executing. This is useful for system packages since dependencies
176                 # on system packages are frequently unspecified. For example, see
177                 # bug #256616.
178                 self._merge_wait_queue = deque()
179                 # Holds merges that have been transfered from the merge_wait_queue to
180                 # the actual merge queue. They are removed from this list upon
181                 # completion. Other packages can start building only when this list is
182                 # empty.
183                 self._merge_wait_scheduled = []
184
185                 # Holds system packages and their deep runtime dependencies. Before
186                 # being merged, these packages go to merge_wait_queue, to be merged
187                 # when no other packages are building.
188                 self._deep_system_deps = set()
189
190                 # Holds packages to merge which will satisfy currently unsatisfied
191                 # deep runtime dependencies of system packages. If this is not empty
192                 # then no parallel builds will be spawned until it is empty. This
193                 # minimizes the possibility that a build will fail due to the system
194                 # being in a fragile state. For example, see bug #259954.
195                 self._unsatisfied_system_deps = set()
196
197                 self._status_display = JobStatusDisplay(
198                         xterm_titles=('notitles' not in settings.features))
199                 self.sched_iface.timeout_add(self._max_display_latency,
200                         self._status_display.display)
201                 self._max_load = myopts.get("--load-average")
202                 max_jobs = myopts.get("--jobs")
203                 if max_jobs is None:
204                         max_jobs = 1
205                 self._set_max_jobs(max_jobs)
206                 self._running_root = trees[trees._running_eroot]["root_config"]
207                 self.edebug = 0
208                 if settings.get("PORTAGE_DEBUG", "") == "1":
209                         self.edebug = 1
210                 self.pkgsettings = {}
211                 self._config_pool = {}
212                 for root in self.trees:
213                         self._config_pool[root] = []
214
215                 self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
216                         'emerge-fetch.log')
217                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
218                         schedule=self._schedule_fetch)
219                 self._sched_iface = self._iface_class(
220                         fetch=fetch_iface, output=self._task_output,
221                         idle_add=self._event_loop.idle_add,
222                         io_add_watch=self._event_loop.io_add_watch,
223                         iteration=self._event_loop.iteration,
224                         register=self._event_loop.io_add_watch,
225                         schedule=self._event_loop._poll_loop,
226                         scheduleSetup=self._schedule_setup,
227                         scheduleUnpack=self._schedule_unpack,
228                         source_remove=self._event_loop.source_remove,
229                         timeout_add=self._event_loop.timeout_add,
230                         unregister=self._event_loop.source_remove)
231
232                 self._prefetchers = weakref.WeakValueDictionary()
233                 self._pkg_queue = []
234                 self._running_tasks = {}
235                 self._completed_tasks = set()
236
237                 self._failed_pkgs = []
238                 self._failed_pkgs_all = []
239                 self._failed_pkgs_die_msgs = []
240                 self._post_mod_echo_msgs = []
241                 self._parallel_fetch = False
242                 self._init_graph(graph_config)
243                 merge_count = len([x for x in self._mergelist \
244                         if isinstance(x, Package) and x.operation == "merge"])
245                 self._pkg_count = self._pkg_count_class(
246                         curval=0, maxval=merge_count)
247                 self._status_display.maxval = self._pkg_count.maxval
248
249                 # The load average takes some time to respond when new
250                 # jobs are added, so we need to limit the rate of adding
251                 # new jobs.
252                 self._job_delay_max = 10
253                 self._job_delay_factor = 1.0
254                 self._job_delay_exp = 1.5
255                 self._previous_job_start_time = None
256
257                 # This is used to memoize the _choose_pkg() result when
258                 # no packages can be chosen until one of the existing
259                 # jobs completes.
260                 self._choose_pkg_return_early = False
261
262                 features = self.settings.features
263                 if "parallel-fetch" in features and \
264                         not ("--pretend" in self.myopts or \
265                         "--fetch-all-uri" in self.myopts or \
266                         "--fetchonly" in self.myopts):
267                         if "distlocks" not in features:
268                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
269                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
270                                         "requires the distlocks feature enabled"+"\n",
271                                         noiselevel=-1)
272                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
273                                         "thus parallel-fetching is being disabled"+"\n",
274                                         noiselevel=-1)
275                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
276                         elif merge_count > 1:
277                                 self._parallel_fetch = True
278
279                 if self._parallel_fetch:
280                                 # clear out existing fetch log if it exists
281                                 try:
282                                         open(self._fetch_log, 'w').close()
283                                 except EnvironmentError:
284                                         pass
285
286                 self._running_portage = None
287                 portage_match = self._running_root.trees["vartree"].dbapi.match(
288                         portage.const.PORTAGE_PACKAGE_ATOM)
289                 if portage_match:
290                         cpv = portage_match.pop()
291                         self._running_portage = self._pkg(cpv, "installed",
292                                 self._running_root, installed=True)
293
294         def _handle_self_update(self):
295
296                 if self._opts_no_self_update.intersection(self.myopts):
297                         return os.EX_OK
298
299                 for x in self._mergelist:
300                         if not isinstance(x, Package):
301                                 continue
302                         if x.operation != "merge":
303                                 continue
304                         if x.root != self._running_root.root:
305                                 continue
306                         if not portage.dep.match_from_list(
307                                 portage.const.PORTAGE_PACKAGE_ATOM, [x]):
308                                 continue
309                         if self._running_portage is None or \
310                                 self._running_portage.cpv != x.cpv or \
311                                 '9999' in x.cpv or \
312                                 'git' in x.inherited or \
313                                 'git-2' in x.inherited:
314                                 rval = _check_temp_dir(self.settings)
315                                 if rval != os.EX_OK:
316                                         return rval
317                                 _prepare_self_update(self.settings)
318                         break
319
320                 return os.EX_OK
321
322         def _terminate_tasks(self):
323                 self._status_display.quiet = True
324                 while self._running_tasks:
325                         task_id, task = self._running_tasks.popitem()
326                         task.cancel()
327                 for q in self._task_queues.values():
328                         q.clear()
329
330         def _init_graph(self, graph_config):
331                 """
332                 Initialization structures used for dependency calculations
333                 involving currently installed packages.
334                 """
335                 self._set_graph_config(graph_config)
336                 self._blocker_db = {}
337                 dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
338                 for root in self.trees:
339                         if graph_config is None:
340                                 fake_vartree = FakeVartree(self.trees[root]["root_config"],
341                                         pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps)
342                                 fake_vartree.sync()
343                         else:
344                                 fake_vartree = graph_config.trees[root]['vartree']
345                         self._blocker_db[root] = BlockerDB(fake_vartree)
346
347         def _destroy_graph(self):
348                 """
349                 Use this to free memory at the beginning of _calc_resume_list().
350                 After _calc_resume_list(), the _init_graph() method
351                 must to be called in order to re-generate the structures that
352                 this method destroys. 
353                 """
354                 self._blocker_db = None
355                 self._set_graph_config(None)
356                 gc.collect()
357
358         def _set_max_jobs(self, max_jobs):
359                 self._max_jobs = max_jobs
360                 self._task_queues.jobs.max_jobs = max_jobs
361                 if "parallel-install" in self.settings.features:
362                         self._task_queues.merge.max_jobs = max_jobs
363
364         def _background_mode(self):
365                 """
366                 Check if background mode is enabled and adjust states as necessary.
367
368                 @rtype: bool
369                 @returns: True if background mode is enabled, False otherwise.
370                 """
371                 background = (self._max_jobs is True or \
372                         self._max_jobs > 1 or "--quiet" in self.myopts \
373                         or self.myopts.get("--quiet-build") == "y") and \
374                         not bool(self._opts_no_background.intersection(self.myopts))
375
376                 if background:
377                         interactive_tasks = self._get_interactive_tasks()
378                         if interactive_tasks:
379                                 background = False
380                                 writemsg_level(">>> Sending package output to stdio due " + \
381                                         "to interactive package(s):\n",
382                                         level=logging.INFO, noiselevel=-1)
383                                 msg = [""]
384                                 for pkg in interactive_tasks:
385                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
386                                         if pkg.root_config.settings["ROOT"] != "/":
387                                                 pkg_str += " for " + pkg.root
388                                         msg.append(pkg_str)
389                                 msg.append("")
390                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
391                                         level=logging.INFO, noiselevel=-1)
392                                 if self._max_jobs is True or self._max_jobs > 1:
393                                         self._set_max_jobs(1)
394                                         writemsg_level(">>> Setting --jobs=1 due " + \
395                                                 "to the above interactive package(s)\n",
396                                                 level=logging.INFO, noiselevel=-1)
397                                         writemsg_level(">>> In order to temporarily mask " + \
398                                                 "interactive updates, you may\n" + \
399                                                 ">>> specify --accept-properties=-interactive\n",
400                                                 level=logging.INFO, noiselevel=-1)
401                 self._status_display.quiet = \
402                         not background or \
403                         ("--quiet" in self.myopts and \
404                         "--verbose" not in self.myopts)
405
406                 self._logger.xterm_titles = \
407                         "notitles" not in self.settings.features and \
408                         self._status_display.quiet
409
410                 return background
411
412         def _get_interactive_tasks(self):
413                 interactive_tasks = []
414                 for task in self._mergelist:
415                         if not (isinstance(task, Package) and \
416                                 task.operation == "merge"):
417                                 continue
418                         if 'interactive' in task.metadata.properties:
419                                 interactive_tasks.append(task)
420                 return interactive_tasks
421
422         def _set_graph_config(self, graph_config):
423
424                 if graph_config is None:
425                         self._graph_config = None
426                         self._pkg_cache = {}
427                         self._digraph = None
428                         self._mergelist = []
429                         self._deep_system_deps.clear()
430                         return
431
432                 self._graph_config = graph_config
433                 self._pkg_cache = graph_config.pkg_cache
434                 self._digraph = graph_config.graph
435                 self._mergelist = graph_config.mergelist
436
437                 if "--nodeps" in self.myopts or \
438                         (self._max_jobs is not True and self._max_jobs < 2):
439                         # save some memory
440                         self._digraph = None
441                         graph_config.graph = None
442                         graph_config.pkg_cache.clear()
443                         self._deep_system_deps.clear()
444                         for pkg in self._mergelist:
445                                 self._pkg_cache[pkg] = pkg
446                         return
447
448                 self._find_system_deps()
449                 self._prune_digraph()
450                 self._prevent_builddir_collisions()
451                 if '--debug' in self.myopts:
452                         writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
453                         self._digraph.debug_print()
454                         writemsg("\n", noiselevel=-1)
455
456         def _find_system_deps(self):
457                 """
458                 Find system packages and their deep runtime dependencies. Before being
459                 merged, these packages go to merge_wait_queue, to be merged when no
460                 other packages are building.
461                 NOTE: This can only find deep system deps if the system set has been
462                 added to the graph and traversed deeply (the depgraph "complete"
463                 parameter will do this, triggered by emerge --complete-graph option).
464                 """
465                 deep_system_deps = self._deep_system_deps
466                 deep_system_deps.clear()
467                 deep_system_deps.update(
468                         _find_deep_system_runtime_deps(self._digraph))
469                 deep_system_deps.difference_update([pkg for pkg in \
470                         deep_system_deps if pkg.operation != "merge"])
471
472         def _prune_digraph(self):
473                 """
474                 Prune any root nodes that are irrelevant.
475                 """
476
477                 graph = self._digraph
478                 completed_tasks = self._completed_tasks
479                 removed_nodes = set()
480                 while True:
481                         for node in graph.root_nodes():
482                                 if not isinstance(node, Package) or \
483                                         (node.installed and node.operation == "nomerge") or \
484                                         node.onlydeps or \
485                                         node in completed_tasks:
486                                         removed_nodes.add(node)
487                         if removed_nodes:
488                                 graph.difference_update(removed_nodes)
489                         if not removed_nodes:
490                                 break
491                         removed_nodes.clear()
492
493         def _prevent_builddir_collisions(self):
494                 """
495                 When building stages, sometimes the same exact cpv needs to be merged
496                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
497                 in the builddir. Currently, normal file locks would be inappropriate
498                 for this purpose since emerge holds all of it's build dir locks from
499                 the main process.
500                 """
501                 cpv_map = {}
502                 for pkg in self._mergelist:
503                         if not isinstance(pkg, Package):
504                                 # a satisfied blocker
505                                 continue
506                         if pkg.installed:
507                                 continue
508                         if pkg.cpv not in cpv_map:
509                                 cpv_map[pkg.cpv] = [pkg]
510                                 continue
511                         for earlier_pkg in cpv_map[pkg.cpv]:
512                                 self._digraph.add(earlier_pkg, pkg,
513                                         priority=DepPriority(buildtime=True))
514                         cpv_map[pkg.cpv].append(pkg)
515
516         class _pkg_failure(portage.exception.PortageException):
517                 """
518                 An instance of this class is raised by unmerge() when
519                 an uninstallation fails.
520                 """
521                 status = 1
522                 def __init__(self, *pargs):
523                         portage.exception.PortageException.__init__(self, pargs)
524                         if pargs:
525                                 self.status = pargs[0]
526
527         def _schedule_fetch(self, fetcher):
528                 """
529                 Schedule a fetcher, in order to control the number of concurrent
530                 fetchers. If self._max_jobs is greater than 1 then the fetch
531                 queue is bypassed and the fetcher is started immediately,
532                 otherwise it is added to the front of the parallel-fetch queue.
533                 NOTE: The parallel-fetch queue is currently used to serialize
534                 access to the parallel-fetch log, so changes in the log handling
535                 would be required before it would be possible to enable
536                 concurrent fetching within the parallel-fetch queue.
537                 """
538                 if self._max_jobs > 1:
539                         fetcher.start()
540                 else:
541                         self._task_queues.fetch.addFront(fetcher)
542
543         def _schedule_setup(self, setup_phase):
544                 """
545                 Schedule a setup phase on the merge queue, in order to
546                 serialize unsandboxed access to the live filesystem.
547                 """
548                 if self._task_queues.merge.max_jobs > 1 and \
549                         "ebuild-locks" in self.settings.features:
550                         # Use a separate queue for ebuild-locks when the merge
551                         # queue allows more than 1 job (due to parallel-install),
552                         # since the portage.locks module does not behave as desired
553                         # if we try to lock the same file multiple times
554                         # concurrently from the same process.
555                         self._task_queues.ebuild_locks.add(setup_phase)
556                 else:
557                         self._task_queues.merge.add(setup_phase)
558                 self._schedule()
559
560         def _schedule_unpack(self, unpack_phase):
561                 """
562                 Schedule an unpack phase on the unpack queue, in order
563                 to serialize $DISTDIR access for live ebuilds.
564                 """
565                 self._task_queues.unpack.add(unpack_phase)
566
567         def _find_blockers(self, new_pkg):
568                 """
569                 Returns a callable.
570                 """
571                 def get_blockers():
572                         return self._find_blockers_impl(new_pkg)
573                 return get_blockers
574
575         def _find_blockers_impl(self, new_pkg):
576                 if self._opts_ignore_blockers.intersection(self.myopts):
577                         return None
578
579                 blocker_db = self._blocker_db[new_pkg.root]
580
581                 blocker_dblinks = []
582                 for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
583                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
584                                 continue
585                         if new_pkg.cpv == blocking_pkg.cpv:
586                                 continue
587                         blocker_dblinks.append(portage.dblink(
588                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
589                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
590                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
591
592                 return blocker_dblinks
593
594         def _generate_digests(self):
595                 """
596                 Generate digests if necessary for --digests or FEATURES=digest.
597                 In order to avoid interference, this must done before parallel
598                 tasks are started.
599                 """
600
601                 if '--fetchonly' in self.myopts:
602                         return os.EX_OK
603
604                 digest = '--digest' in self.myopts
605                 if not digest:
606                         for pkgsettings in self.pkgsettings.values():
607                                 if pkgsettings.mycpv is not None:
608                                         # ensure that we are using global features
609                                         # settings rather than those from package.env
610                                         pkgsettings.reset()
611                                 if 'digest' in pkgsettings.features:
612                                         digest = True
613                                         break
614
615                 if not digest:
616                         return os.EX_OK
617
618                 for x in self._mergelist:
619                         if not isinstance(x, Package) or \
620                                 x.type_name != 'ebuild' or \
621                                 x.operation != 'merge':
622                                 continue
623                         pkgsettings = self.pkgsettings[x.root]
624                         if pkgsettings.mycpv is not None:
625                                 # ensure that we are using global features
626                                 # settings rather than those from package.env
627                                 pkgsettings.reset()
628                         if '--digest' not in self.myopts and \
629                                 'digest' not in pkgsettings.features:
630                                 continue
631                         portdb = x.root_config.trees['porttree'].dbapi
632                         ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
633                         if ebuild_path is None:
634                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
635                         pkgsettings['O'] = os.path.dirname(ebuild_path)
636                         if not digestgen(mysettings=pkgsettings, myportdb=portdb):
637                                 writemsg_level(
638                                         "!!! Unable to generate manifest for '%s'.\n" \
639                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
640                                 return 1
641
642                 return os.EX_OK
643
644         def _env_sanity_check(self):
645                 """
646                 Verify a sane environment before trying to build anything from source.
647                 """
648                 have_src_pkg = False
649                 for x in self._mergelist:
650                         if isinstance(x, Package) and not x.built:
651                                 have_src_pkg = True
652                                 break
653
654                 if not have_src_pkg:
655                         return os.EX_OK
656
657                 for settings in self.pkgsettings.values():
658                         for var in ("ARCH", ):
659                                 value = settings.get(var)
660                                 if value and value.strip():
661                                         continue
662                                 msg = _("%(var)s is not set... "
663                                         "Are you missing the '%(configroot)setc/make.profile' symlink? "
664                                         "Is the symlink correct? "
665                                         "Is your portage tree complete?") % \
666                                         {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
667
668                                 out = portage.output.EOutput()
669                                 for line in textwrap.wrap(msg, 70):
670                                         out.eerror(line)
671                                 return 1
672
673                 return os.EX_OK
674
675         def _check_manifests(self):
676                 # Verify all the manifests now so that the user is notified of failure
677                 # as soon as possible.
678                 if "strict" not in self.settings.features or \
679                         "--fetchonly" in self.myopts or \
680                         "--fetch-all-uri" in self.myopts:
681                         return os.EX_OK
682
683                 shown_verifying_msg = False
684                 quiet_settings = {}
685                 for myroot, pkgsettings in self.pkgsettings.items():
686                         quiet_config = portage.config(clone=pkgsettings)
687                         quiet_config["PORTAGE_QUIET"] = "1"
688                         quiet_config.backup_changes("PORTAGE_QUIET")
689                         quiet_settings[myroot] = quiet_config
690                         del quiet_config
691
692                 failures = 0
693
694                 for x in self._mergelist:
695                         if not isinstance(x, Package) or \
696                                 x.type_name != "ebuild":
697                                 continue
698
699                         if x.operation == "uninstall":
700                                 continue
701
702                         if not shown_verifying_msg:
703                                 shown_verifying_msg = True
704                                 self._status_msg("Verifying ebuild manifests")
705
706                         root_config = x.root_config
707                         portdb = root_config.trees["porttree"].dbapi
708                         quiet_config = quiet_settings[root_config.root]
709                         ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
710                         if ebuild_path is None:
711                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
712                         quiet_config["O"] = os.path.dirname(ebuild_path)
713                         if not digestcheck([], quiet_config, strict=True):
714                                 failures |= 1
715
716                 if failures:
717                         return 1
718                 return os.EX_OK
719
720         def _add_prefetchers(self):
721
722                 if not self._parallel_fetch:
723                         return
724
725                 if self._parallel_fetch:
726                         self._status_msg("Starting parallel fetch")
727
728                         prefetchers = self._prefetchers
729
730                         for pkg in self._mergelist:
731                                 # mergelist can contain solved Blocker instances
732                                 if not isinstance(pkg, Package) or pkg.operation == "uninstall":
733                                         continue
734                                 prefetcher = self._create_prefetcher(pkg)
735                                 if prefetcher is not None:
736                                         self._task_queues.fetch.add(prefetcher)
737                                         prefetchers[pkg] = prefetcher
738
739                         # Start the first prefetcher immediately so that self._task()
740                         # won't discard it. This avoids a case where the first
741                         # prefetcher is discarded, causing the second prefetcher to
742                         # occupy the fetch queue before the first fetcher has an
743                         # opportunity to execute.
744                         self._task_queues.fetch.schedule()
745
746         def _create_prefetcher(self, pkg):
747                 """
748                 @return: a prefetcher, or None if not applicable
749                 """
750                 prefetcher = None
751
752                 if not isinstance(pkg, Package):
753                         pass
754
755                 elif pkg.type_name == "ebuild":
756
757                         prefetcher = EbuildFetcher(background=True,
758                                 config_pool=self._ConfigPool(pkg.root,
759                                 self._allocate_config, self._deallocate_config),
760                                 fetchonly=1, logfile=self._fetch_log,
761                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
762
763                 elif pkg.type_name == "binary" and \
764                         "--getbinpkg" in self.myopts and \
765                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
766
767                         prefetcher = BinpkgPrefetcher(background=True,
768                                 pkg=pkg, scheduler=self._sched_iface)
769
770                 return prefetcher
771
772         def _run_pkg_pretend(self):
773                 """
774                 Since pkg_pretend output may be important, this method sends all
775                 output directly to stdout (regardless of options like --quiet or
776                 --jobs).
777                 """
778
779                 failures = 0
780
781                 # Use a local PollScheduler instance here, since we don't
782                 # want tasks here to trigger the usual Scheduler callbacks
783                 # that handle job scheduling and status display.
784                 sched_iface = PollScheduler().sched_iface
785
786                 for x in self._mergelist:
787                         if not isinstance(x, Package):
788                                 continue
789
790                         if x.operation == "uninstall":
791                                 continue
792
793                         if x.metadata["EAPI"] in ("0", "1", "2", "3"):
794                                 continue
795
796                         if "pretend" not in x.metadata.defined_phases:
797                                 continue
798
799                         out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
800                         portage.util.writemsg_stdout(out_str, noiselevel=-1)
801
802                         root_config = x.root_config
803                         settings = self.pkgsettings[root_config.root]
804                         settings.setcpv(x)
805
806                         # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
807                         # have to validate it for each package
808                         rval = _check_temp_dir(settings)
809                         if rval != os.EX_OK:
810                                 return rval
811
812                         build_dir_path = os.path.join(
813                                 os.path.realpath(settings["PORTAGE_TMPDIR"]),
814                                 "portage", x.category, x.pf)
815                         existing_buildir = os.path.isdir(build_dir_path)
816                         settings["PORTAGE_BUILDDIR"] = build_dir_path
817                         build_dir = EbuildBuildDir(scheduler=sched_iface,
818                                 settings=settings)
819                         build_dir.lock()
820                         current_task = None
821
822                         try:
823
824                                 # Clean up the existing build dir, in case pkg_pretend
825                                 # checks for available space (bug #390711).
826                                 if existing_buildir:
827                                         if x.built:
828                                                 tree = "bintree"
829                                                 infloc = os.path.join(build_dir_path, "build-info")
830                                                 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
831                                         else:
832                                                 tree = "porttree"
833                                                 portdb = root_config.trees["porttree"].dbapi
834                                                 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
835                                                 if ebuild_path is None:
836                                                         raise AssertionError(
837                                                                 "ebuild not found for '%s'" % x.cpv)
838                                         portage.package.ebuild.doebuild.doebuild_environment(
839                                                 ebuild_path, "clean", settings=settings,
840                                                 db=self.trees[settings['EROOT']][tree].dbapi)
841                                         clean_phase = EbuildPhase(background=False,
842                                                 phase='clean', scheduler=sched_iface, settings=settings)
843                                         current_task = clean_phase
844                                         clean_phase.start()
845                                         clean_phase.wait()
846
847                                 if x.built:
848                                         tree = "bintree"
849                                         bintree = root_config.trees["bintree"].dbapi.bintree
850                                         fetched = False
851
852                                         # Display fetch on stdout, so that it's always clear what
853                                         # is consuming time here.
854                                         if bintree.isremote(x.cpv):
855                                                 fetcher = BinpkgFetcher(pkg=x,
856                                                         scheduler=sched_iface)
857                                                 fetcher.start()
858                                                 if fetcher.wait() != os.EX_OK:
859                                                         failures += 1
860                                                         continue
861                                                 fetched = fetcher.pkg_path
862
863                                         verifier = BinpkgVerifier(pkg=x,
864                                                 scheduler=sched_iface)
865                                         current_task = verifier
866                                         verifier.start()
867                                         if verifier.wait() != os.EX_OK:
868                                                 failures += 1
869                                                 continue
870
871                                         if fetched:
872                                                 bintree.inject(x.cpv, filename=fetched)
873                                         tbz2_file = bintree.getname(x.cpv)
874                                         infloc = os.path.join(build_dir_path, "build-info")
875                                         ensure_dirs(infloc)
876                                         portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
877                                         ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
878                                         settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
879                                         settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
880
881                                 else:
882                                         tree = "porttree"
883                                         portdb = root_config.trees["porttree"].dbapi
884                                         ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
885                                         if ebuild_path is None:
886                                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
887                                         settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
888                                         if self._build_opts.buildpkgonly:
889                                                 settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
890                                         else:
891                                                 settings.configdict["pkg"]["MERGE_TYPE"] = "source"
892
893                                 portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
894                                         "pretend", settings=settings,
895                                         db=self.trees[settings['EROOT']][tree].dbapi)
896
897                                 prepare_build_dirs(root_config.root, settings, cleanup=0)
898
899                                 vardb = root_config.trees['vartree'].dbapi
900                                 settings["REPLACING_VERSIONS"] = " ".join(
901                                         set(portage.versions.cpv_getversion(match) \
902                                                 for match in vardb.match(x.slot_atom) + \
903                                                 vardb.match('='+x.cpv)))
904                                 pretend_phase = EbuildPhase(
905                                         phase="pretend", scheduler=sched_iface,
906                                         settings=settings)
907
908                                 current_task = pretend_phase
909                                 pretend_phase.start()
910                                 ret = pretend_phase.wait()
911                                 if ret != os.EX_OK:
912                                         failures += 1
913                                 portage.elog.elog_process(x.cpv, settings)
914                         finally:
915                                 if current_task is not None and current_task.isAlive():
916                                         current_task.cancel()
917                                 clean_phase = EbuildPhase(background=False,
918                                         phase='clean', scheduler=sched_iface, settings=settings)
919                                 clean_phase.start()
920                                 clean_phase.wait()
921                                 build_dir.unlock()
922
923                 if failures:
924                         return 1
925                 return os.EX_OK
926
927         def merge(self):
928                 if "--resume" in self.myopts:
929                         # We're resuming.
930                         portage.writemsg_stdout(
931                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
932                         self._logger.log(" *** Resuming merge...")
933
934                 self._save_resume_list()
935
936                 try:
937                         self._background = self._background_mode()
938                 except self._unknown_internal_error:
939                         return 1
940
941                 rval = self._handle_self_update()
942                 if rval != os.EX_OK:
943                         return rval
944
945                 for root in self.trees:
946                         root_config = self.trees[root]["root_config"]
947
948                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
949                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
950                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
951                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
952                         if not tmpdir or not os.path.isdir(tmpdir):
953                                 msg = "The directory specified in your " + \
954                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
955                                 "does not exist. Please create this " + \
956                                 "directory or correct your PORTAGE_TMPDIR setting."
957                                 msg = textwrap.wrap(msg, 70)
958                                 out = portage.output.EOutput()
959                                 for l in msg:
960                                         out.eerror(l)
961                                 return 1
962
963                         if self._background:
964                                 root_config.settings.unlock()
965                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
966                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
967                                 root_config.settings.lock()
968
969                         self.pkgsettings[root] = portage.config(
970                                 clone=root_config.settings)
971
972                 keep_going = "--keep-going" in self.myopts
973                 fetchonly = self._build_opts.fetchonly
974                 mtimedb = self._mtimedb
975                 failed_pkgs = self._failed_pkgs
976
977                 rval = self._generate_digests()
978                 if rval != os.EX_OK:
979                         return rval
980
981                 rval = self._env_sanity_check()
982                 if rval != os.EX_OK:
983                         return rval
984
985                 # TODO: Immediately recalculate deps here if --keep-going
986                 #       is enabled and corrupt manifests are detected.
987                 rval = self._check_manifests()
988                 if rval != os.EX_OK and not keep_going:
989                         return rval
990
991                 if not fetchonly:
992                         rval = self._run_pkg_pretend()
993                         if rval != os.EX_OK:
994                                 return rval
995
996                 while True:
997
998                         received_signal = []
999
1000                         def sighandler(signum, frame):
1001                                 signal.signal(signal.SIGINT, signal.SIG_IGN)
1002                                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
1003                                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
1004                                         {"signal":signum})
1005                                 self.terminate()
1006                                 received_signal.append(128 + signum)
1007
1008                         earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
1009                         earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
1010
1011                         try:
1012                                 rval = self._merge()
1013                         finally:
1014                                 # Restore previous handlers
1015                                 if earlier_sigint_handler is not None:
1016                                         signal.signal(signal.SIGINT, earlier_sigint_handler)
1017                                 else:
1018                                         signal.signal(signal.SIGINT, signal.SIG_DFL)
1019                                 if earlier_sigterm_handler is not None:
1020                                         signal.signal(signal.SIGTERM, earlier_sigterm_handler)
1021                                 else:
1022                                         signal.signal(signal.SIGTERM, signal.SIG_DFL)
1023
1024                         if received_signal:
1025                                 sys.exit(received_signal[0])
1026
1027                         if rval == os.EX_OK or fetchonly or not keep_going:
1028                                 break
1029                         if "resume" not in mtimedb:
1030                                 break
1031                         mergelist = self._mtimedb["resume"].get("mergelist")
1032                         if not mergelist:
1033                                 break
1034
1035                         if not failed_pkgs:
1036                                 break
1037
1038                         for failed_pkg in failed_pkgs:
1039                                 mergelist.remove(list(failed_pkg.pkg))
1040
1041                         self._failed_pkgs_all.extend(failed_pkgs)
1042                         del failed_pkgs[:]
1043
1044                         if not mergelist:
1045                                 break
1046
1047                         if not self._calc_resume_list():
1048                                 break
1049
1050                         clear_caches(self.trees)
1051                         if not self._mergelist:
1052                                 break
1053
1054                         self._save_resume_list()
1055                         self._pkg_count.curval = 0
1056                         self._pkg_count.maxval = len([x for x in self._mergelist \
1057                                 if isinstance(x, Package) and x.operation == "merge"])
1058                         self._status_display.maxval = self._pkg_count.maxval
1059
1060                 self._logger.log(" *** Finished. Cleaning up...")
1061
1062                 if failed_pkgs:
1063                         self._failed_pkgs_all.extend(failed_pkgs)
1064                         del failed_pkgs[:]
1065
1066                 printer = portage.output.EOutput()
1067                 background = self._background
1068                 failure_log_shown = False
1069                 if background and len(self._failed_pkgs_all) == 1:
1070                         # If only one package failed then just show it's
1071                         # whole log for easy viewing.
1072                         failed_pkg = self._failed_pkgs_all[-1]
1073                         log_file = None
1074                         log_file_real = None
1075
1076                         log_path = self._locate_failure_log(failed_pkg)
1077                         if log_path is not None:
1078                                 try:
1079                                         log_file = open(_unicode_encode(log_path,
1080                                                 encoding=_encodings['fs'], errors='strict'), mode='rb')
1081                                 except IOError:
1082                                         pass
1083                                 else:
1084                                         if log_path.endswith('.gz'):
1085                                                 log_file_real = log_file
1086                                                 log_file =  gzip.GzipFile(filename='',
1087                                                         mode='rb', fileobj=log_file)
1088
1089                         if log_file is not None:
1090                                 try:
1091                                         for line in log_file:
1092                                                 writemsg_level(line, noiselevel=-1)
1093                                 except zlib.error as e:
1094                                         writemsg_level("%s\n" % (e,), level=logging.ERROR,
1095                                                 noiselevel=-1)
1096                                 finally:
1097                                         log_file.close()
1098                                         if log_file_real is not None:
1099                                                 log_file_real.close()
1100                                 failure_log_shown = True
1101
1102                 # Dump mod_echo output now since it tends to flood the terminal.
1103                 # This allows us to avoid having more important output, generated
1104                 # later, from being swept away by the mod_echo output.
1105                 mod_echo_output =  _flush_elog_mod_echo()
1106
1107                 if background and not failure_log_shown and \
1108                         self._failed_pkgs_all and \
1109                         self._failed_pkgs_die_msgs and \
1110                         not mod_echo_output:
1111
1112                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1113                                 root_msg = ""
1114                                 if mysettings["ROOT"] != "/":
1115                                         root_msg = " merged to %s" % mysettings["ROOT"]
1116                                 print()
1117                                 printer.einfo("Error messages for package %s%s:" % \
1118                                         (colorize("INFORM", key), root_msg))
1119                                 print()
1120                                 for phase in portage.const.EBUILD_PHASES:
1121                                         if phase not in logentries:
1122                                                 continue
1123                                         for msgtype, msgcontent in logentries[phase]:
1124                                                 if isinstance(msgcontent, basestring):
1125                                                         msgcontent = [msgcontent]
1126                                                 for line in msgcontent:
1127                                                         printer.eerror(line.strip("\n"))
1128
1129                 if self._post_mod_echo_msgs:
1130                         for msg in self._post_mod_echo_msgs:
1131                                 msg()
1132
1133                 if len(self._failed_pkgs_all) > 1 or \
1134                         (self._failed_pkgs_all and keep_going):
1135                         if len(self._failed_pkgs_all) > 1:
1136                                 msg = "The following %d packages have " % \
1137                                         len(self._failed_pkgs_all) + \
1138                                         "failed to build or install:"
1139                         else:
1140                                 msg = "The following package has " + \
1141                                         "failed to build or install:"
1142
1143                         printer.eerror("")
1144                         for line in textwrap.wrap(msg, 72):
1145                                 printer.eerror(line)
1146                         printer.eerror("")
1147                         for failed_pkg in self._failed_pkgs_all:
1148                                 # Use _unicode_decode() to force unicode format string so
1149                                 # that Package.__unicode__() is called in python2.
1150                                 msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
1151                                 log_path = self._locate_failure_log(failed_pkg)
1152                                 if log_path is not None:
1153                                         msg += ", Log file:"
1154                                 printer.eerror(msg)
1155                                 if log_path is not None:
1156                                         printer.eerror("  '%s'" % colorize('INFORM', log_path))
1157                         printer.eerror("")
1158
1159                 if self._failed_pkgs_all:
1160                         return 1
1161                 return os.EX_OK
1162
1163         def _elog_listener(self, mysettings, key, logentries, fulltext):
1164                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1165                 if errors:
1166                         self._failed_pkgs_die_msgs.append(
1167                                 (mysettings, key, errors))
1168
1169         def _locate_failure_log(self, failed_pkg):
1170
1171                 log_paths = [failed_pkg.build_log]
1172
1173                 for log_path in log_paths:
1174                         if not log_path:
1175                                 continue
1176
1177                         try:
1178                                 log_size = os.stat(log_path).st_size
1179                         except OSError:
1180                                 continue
1181
1182                         if log_size == 0:
1183                                 continue
1184
1185                         return log_path
1186
1187                 return None
1188
1189         def _add_packages(self):
1190                 pkg_queue = self._pkg_queue
1191                 for pkg in self._mergelist:
1192                         if isinstance(pkg, Package):
1193                                 pkg_queue.append(pkg)
1194                         elif isinstance(pkg, Blocker):
1195                                 pass
1196
1197         def _system_merge_started(self, merge):
1198                 """
1199                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1200                 In general, this keeps track of installed system packages with
1201                 unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
1202                 a fragile situation, so we don't execute any unrelated builds until
1203                 the circular dependencies are built and installed.
1204                 """
1205                 graph = self._digraph
1206                 if graph is None:
1207                         return
1208                 pkg = merge.merge.pkg
1209
1210                 # Skip this if $ROOT != / since it shouldn't matter if there
1211                 # are unsatisfied system runtime deps in this case.
1212                 if pkg.root_config.settings["ROOT"] != "/":
1213                         return
1214
1215                 completed_tasks = self._completed_tasks
1216                 unsatisfied = self._unsatisfied_system_deps
1217
1218                 def ignore_non_runtime_or_satisfied(priority):
1219                         """
1220                         Ignore non-runtime and satisfied runtime priorities.
1221                         """
1222                         if isinstance(priority, DepPriority) and \
1223                                 not priority.satisfied and \
1224                                 (priority.runtime or priority.runtime_post):
1225                                 return False
1226                         return True
1227
1228                 # When checking for unsatisfied runtime deps, only check
1229                 # direct deps since indirect deps are checked when the
1230                 # corresponding parent is merged.
1231                 for child in graph.child_nodes(pkg,
1232                         ignore_priority=ignore_non_runtime_or_satisfied):
1233                         if not isinstance(child, Package) or \
1234                                 child.operation == 'uninstall':
1235                                 continue
1236                         if child is pkg:
1237                                 continue
1238                         if child.operation == 'merge' and \
1239                                 child not in completed_tasks:
1240                                 unsatisfied.add(child)
1241
1242         def _merge_wait_exit_handler(self, task):
1243                 self._merge_wait_scheduled.remove(task)
1244                 self._merge_exit(task)
1245
1246         def _merge_exit(self, merge):
1247                 self._running_tasks.pop(id(merge), None)
1248                 self._do_merge_exit(merge)
1249                 self._deallocate_config(merge.merge.settings)
1250                 if merge.returncode == os.EX_OK and \
1251                         not merge.merge.pkg.installed:
1252                         self._status_display.curval += 1
1253                 self._status_display.merges = len(self._task_queues.merge)
1254                 self._schedule()
1255
1256         def _do_merge_exit(self, merge):
1257                 pkg = merge.merge.pkg
1258                 if merge.returncode != os.EX_OK:
1259                         settings = merge.merge.settings
1260                         build_dir = settings.get("PORTAGE_BUILDDIR")
1261                         build_log = settings.get("PORTAGE_LOG_FILE")
1262
1263                         self._failed_pkgs.append(self._failed_pkg(
1264                                 build_dir=build_dir, build_log=build_log,
1265                                 pkg=pkg,
1266                                 returncode=merge.returncode))
1267                         if not self._terminated_tasks:
1268                                 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1269                                 self._status_display.failed = len(self._failed_pkgs)
1270                         return
1271
1272                 self._task_complete(pkg)
1273                 pkg_to_replace = merge.merge.pkg_to_replace
1274                 if pkg_to_replace is not None:
1275                         # When a package is replaced, mark it's uninstall
1276                         # task complete (if any).
1277                         if self._digraph is not None and \
1278                                 pkg_to_replace in self._digraph:
1279                                 try:
1280                                         self._pkg_queue.remove(pkg_to_replace)
1281                                 except ValueError:
1282                                         pass
1283                                 self._task_complete(pkg_to_replace)
1284                         else:
1285                                 self._pkg_cache.pop(pkg_to_replace, None)
1286
1287                 if pkg.installed:
1288                         return
1289
1290                 # Call mtimedb.commit() after each merge so that
1291                 # --resume still works after being interrupted
1292                 # by reboot, sigkill or similar.
1293                 mtimedb = self._mtimedb
1294                 mtimedb["resume"]["mergelist"].remove(list(pkg))
1295                 if not mtimedb["resume"]["mergelist"]:
1296                         del mtimedb["resume"]
1297                 mtimedb.commit()
1298
1299         def _build_exit(self, build):
1300                 self._running_tasks.pop(id(build), None)
1301                 if build.returncode == os.EX_OK and self._terminated_tasks:
1302                         # We've been interrupted, so we won't
1303                         # add this to the merge queue.
1304                         self.curval += 1
1305                         self._deallocate_config(build.settings)
1306                 elif build.returncode == os.EX_OK:
1307                         self.curval += 1
1308                         merge = PackageMerge(merge=build)
1309                         self._running_tasks[id(merge)] = merge
1310                         if not build.build_opts.buildpkgonly and \
1311                                 build.pkg in self._deep_system_deps:
1312                                 # Since dependencies on system packages are frequently
1313                                 # unspecified, merge them only when no builds are executing.
1314                                 self._merge_wait_queue.append(merge)
1315                                 merge.addStartListener(self._system_merge_started)
1316                         else:
1317                                 merge.addExitListener(self._merge_exit)
1318                                 self._task_queues.merge.add(merge)
1319                                 self._status_display.merges = len(self._task_queues.merge)
1320                 else:
1321                         settings = build.settings
1322                         build_dir = settings.get("PORTAGE_BUILDDIR")
1323                         build_log = settings.get("PORTAGE_LOG_FILE")
1324
1325                         self._failed_pkgs.append(self._failed_pkg(
1326                                 build_dir=build_dir, build_log=build_log,
1327                                 pkg=build.pkg,
1328                                 returncode=build.returncode))
1329                         if not self._terminated_tasks:
1330                                 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1331                                 self._status_display.failed = len(self._failed_pkgs)
1332                         self._deallocate_config(build.settings)
1333                 self._jobs -= 1
1334                 self._status_display.running = self._jobs
1335                 self._schedule()
1336
1337         def _extract_exit(self, build):
1338                 self._build_exit(build)
1339
1340         def _task_complete(self, pkg):
1341                 self._completed_tasks.add(pkg)
1342                 self._unsatisfied_system_deps.discard(pkg)
1343                 self._choose_pkg_return_early = False
1344                 blocker_db = self._blocker_db[pkg.root]
1345                 blocker_db.discardBlocker(pkg)
1346
1347         def _merge(self):
1348
1349                 self._add_prefetchers()
1350                 self._add_packages()
1351                 failed_pkgs = self._failed_pkgs
1352                 portage.locks._quiet = self._background
1353                 portage.elog.add_listener(self._elog_listener)
1354                 rval = os.EX_OK
1355
1356                 try:
1357                         self._main_loop()
1358                 finally:
1359                         self._main_loop_cleanup()
1360                         portage.locks._quiet = False
1361                         portage.elog.remove_listener(self._elog_listener)
1362                         if failed_pkgs:
1363                                 rval = failed_pkgs[-1].returncode
1364
1365                 return rval
1366
1367         def _main_loop_cleanup(self):
1368                 del self._pkg_queue[:]
1369                 self._completed_tasks.clear()
1370                 self._deep_system_deps.clear()
1371                 self._unsatisfied_system_deps.clear()
1372                 self._choose_pkg_return_early = False
1373                 self._status_display.reset()
1374                 self._digraph = None
1375                 self._task_queues.fetch.clear()
1376                 self._prefetchers.clear()
1377
1378         def _choose_pkg(self):
1379                 """
1380                 Choose a task that has all its dependencies satisfied. This is used
1381                 for parallel build scheduling, and ensures that we don't build
1382                 anything with deep dependencies that have yet to be merged.
1383                 """
1384
1385                 if self._choose_pkg_return_early:
1386                         return None
1387
1388                 if self._digraph is None:
1389                         if self._is_work_scheduled() and \
1390                                 not ("--nodeps" in self.myopts and \
1391                                 (self._max_jobs is True or self._max_jobs > 1)):
1392                                 self._choose_pkg_return_early = True
1393                                 return None
1394                         return self._pkg_queue.pop(0)
1395
1396                 if not self._is_work_scheduled():
1397                         return self._pkg_queue.pop(0)
1398
1399                 self._prune_digraph()
1400
1401                 chosen_pkg = None
1402
1403                 # Prefer uninstall operations when available.
1404                 graph = self._digraph
1405                 for pkg in self._pkg_queue:
1406                         if pkg.operation == 'uninstall' and \
1407                                 not graph.child_nodes(pkg):
1408                                 chosen_pkg = pkg
1409                                 break
1410
1411                 if chosen_pkg is None:
1412                         later = set(self._pkg_queue)
1413                         for pkg in self._pkg_queue:
1414                                 later.remove(pkg)
1415                                 if not self._dependent_on_scheduled_merges(pkg, later):
1416                                         chosen_pkg = pkg
1417                                         break
1418
1419                 if chosen_pkg is not None:
1420                         self._pkg_queue.remove(chosen_pkg)
1421
1422                 if chosen_pkg is None:
1423                         # There's no point in searching for a package to
1424                         # choose until at least one of the existing jobs
1425                         # completes.
1426                         self._choose_pkg_return_early = True
1427
1428                 return chosen_pkg
1429
1430         def _dependent_on_scheduled_merges(self, pkg, later):
1431                 """
1432                 Traverse the subgraph of the given packages deep dependencies
1433                 to see if it contains any scheduled merges.
1434                 @param pkg: a package to check dependencies for
1435                 @type pkg: Package
1436                 @param later: packages for which dependence should be ignored
1437                         since they will be merged later than pkg anyway and therefore
1438                         delaying the merge of pkg will not result in a more optimal
1439                         merge order
1440                 @type later: set
1441                 @rtype: bool
1442                 @returns: True if the package is dependent, False otherwise.
1443                 """
1444
1445                 graph = self._digraph
1446                 completed_tasks = self._completed_tasks
1447
1448                 dependent = False
1449                 traversed_nodes = set([pkg])
1450                 direct_deps = graph.child_nodes(pkg)
1451                 node_stack = direct_deps
1452                 direct_deps = frozenset(direct_deps)
1453                 while node_stack:
1454                         node = node_stack.pop()
1455                         if node in traversed_nodes:
1456                                 continue
1457                         traversed_nodes.add(node)
1458                         if not ((node.installed and node.operation == "nomerge") or \
1459                                 (node.operation == "uninstall" and \
1460                                 node not in direct_deps) or \
1461                                 node in completed_tasks or \
1462                                 node in later):
1463                                 dependent = True
1464                                 break
1465
1466                         # Don't traverse children of uninstall nodes since
1467                         # those aren't dependencies in the usual sense.
1468                         if node.operation != "uninstall":
1469                                 node_stack.extend(graph.child_nodes(node))
1470
1471                 return dependent
1472
1473         def _allocate_config(self, root):
1474                 """
1475                 Allocate a unique config instance for a task in order
1476                 to prevent interference between parallel tasks.
1477                 """
1478                 if self._config_pool[root]:
1479                         temp_settings = self._config_pool[root].pop()
1480                 else:
1481                         temp_settings = portage.config(clone=self.pkgsettings[root])
1482                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
1483                 # performance reasons, call it here to make sure all settings from the
1484                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
1485                 temp_settings.reload()
1486                 temp_settings.reset()
1487                 return temp_settings
1488
1489         def _deallocate_config(self, settings):
1490                 self._config_pool[settings['EROOT']].append(settings)
1491
1492         def _main_loop(self):
1493
1494                 if self._opts_no_background.intersection(self.myopts):
1495                         self._set_max_jobs(1)
1496
1497                 while self._schedule():
1498                         self.sched_iface.run()
1499
1500                 while True:
1501                         self._schedule()
1502                         if not self._is_work_scheduled():
1503                                 break
1504                         self.sched_iface.run()
1505
1506         def _keep_scheduling(self):
1507                 return bool(not self._terminated_tasks and self._pkg_queue and \
1508                         not (self._failed_pkgs and not self._build_opts.fetchonly))
1509
1510         def _is_work_scheduled(self):
1511                 return bool(self._running_tasks)
1512
1513         def _schedule_tasks(self):
1514
1515                 while True:
1516
1517                         state_change = 0
1518
1519                         # When the number of jobs and merges drops to zero,
1520                         # process a single merge from _merge_wait_queue if
1521                         # it's not empty. We only process one since these are
1522                         # special packages and we want to ensure that
1523                         # parallel-install does not cause more than one of
1524                         # them to install at the same time.
1525                         if (self._merge_wait_queue and not self._jobs and
1526                                 not self._task_queues.merge):
1527                                 task = self._merge_wait_queue.popleft()
1528                                 task.addExitListener(self._merge_wait_exit_handler)
1529                                 self._merge_wait_scheduled.append(task)
1530                                 self._task_queues.merge.add(task)
1531                                 self._status_display.merges = len(self._task_queues.merge)
1532                                 state_change += 1
1533
1534                         if self._schedule_tasks_imp():
1535                                 state_change += 1
1536
1537                         self._status_display.display()
1538
1539                         # Cancel prefetchers if they're the only reason
1540                         # the main poll loop is still running.
1541                         if self._failed_pkgs and not self._build_opts.fetchonly and \
1542                                 not self._is_work_scheduled() and \
1543                                 self._task_queues.fetch:
1544                                 self._task_queues.fetch.clear()
1545                                 state_change += 1
1546
1547                         if not (state_change or \
1548                                 (self._merge_wait_queue and not self._jobs and
1549                                 not self._task_queues.merge)):
1550                                 break
1551
1552                 return self._keep_scheduling()
1553
1554         def _job_delay(self):
1555                 """
1556                 @rtype: bool
1557                 @returns: True if job scheduling should be delayed, False otherwise.
1558                 """
1559
1560                 if self._jobs and self._max_load is not None:
1561
1562                         current_time = time.time()
1563
1564                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1565                         if delay > self._job_delay_max:
1566                                 delay = self._job_delay_max
1567                         if (current_time - self._previous_job_start_time) < delay:
1568                                 return True
1569
1570                 return False
1571
1572         def _schedule_tasks_imp(self):
1573                 """
1574                 @rtype: bool
1575                 @returns: True if state changed, False otherwise.
1576                 """
1577
1578                 state_change = 0
1579
1580                 while True:
1581
1582                         if not self._keep_scheduling():
1583                                 return bool(state_change)
1584
1585                         if self._choose_pkg_return_early or \
1586                                 self._merge_wait_scheduled or \
1587                                 (self._jobs and self._unsatisfied_system_deps) or \
1588                                 not self._can_add_job() or \
1589                                 self._job_delay():
1590                                 return bool(state_change)
1591
1592                         pkg = self._choose_pkg()
1593                         if pkg is None:
1594                                 return bool(state_change)
1595
1596                         state_change += 1
1597
1598                         if not pkg.installed:
1599                                 self._pkg_count.curval += 1
1600
1601                         task = self._task(pkg)
1602
1603                         if pkg.installed:
1604                                 merge = PackageMerge(merge=task)
1605                                 self._running_tasks[id(merge)] = merge
1606                                 merge.addExitListener(self._merge_exit)
1607                                 self._task_queues.merge.addFront(merge)
1608
1609                         elif pkg.built:
1610                                 self._jobs += 1
1611                                 self._previous_job_start_time = time.time()
1612                                 self._status_display.running = self._jobs
1613                                 self._running_tasks[id(task)] = task
1614                                 task.addExitListener(self._extract_exit)
1615                                 self._task_queues.jobs.add(task)
1616
1617                         else:
1618                                 self._jobs += 1
1619                                 self._previous_job_start_time = time.time()
1620                                 self._status_display.running = self._jobs
1621                                 self._running_tasks[id(task)] = task
1622                                 task.addExitListener(self._build_exit)
1623                                 self._task_queues.jobs.add(task)
1624
1625                 return bool(state_change)
1626
1627         def _task(self, pkg):
1628
1629                 pkg_to_replace = None
1630                 if pkg.operation != "uninstall":
1631                         vardb = pkg.root_config.trees["vartree"].dbapi
1632                         previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
1633                                 if portage.cpv_getkey(x) == pkg.cp]
1634                         if not previous_cpv and vardb.cpv_exists(pkg.cpv):
1635                                 # same cpv, different SLOT
1636                                 previous_cpv = [pkg.cpv]
1637                         if previous_cpv:
1638                                 previous_cpv = previous_cpv.pop()
1639                                 pkg_to_replace = self._pkg(previous_cpv,
1640                                         "installed", pkg.root_config, installed=True,
1641                                         operation="uninstall")
1642
1643                 prefetcher = self._prefetchers.pop(pkg, None)
1644                 if prefetcher is not None and not prefetcher.isAlive():
1645                         try:
1646                                 self._task_queues.fetch._task_queue.remove(prefetcher)
1647                         except ValueError:
1648                                 pass
1649                         prefetcher = None
1650
1651                 task = MergeListItem(args_set=self._args_set,
1652                         background=self._background, binpkg_opts=self._binpkg_opts,
1653                         build_opts=self._build_opts,
1654                         config_pool=self._ConfigPool(pkg.root,
1655                         self._allocate_config, self._deallocate_config),
1656                         emerge_opts=self.myopts,
1657                         find_blockers=self._find_blockers(pkg), logger=self._logger,
1658                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1659                         pkg_to_replace=pkg_to_replace,
1660                         prefetcher=prefetcher,
1661                         scheduler=self._sched_iface,
1662                         settings=self._allocate_config(pkg.root),
1663                         statusMessage=self._status_msg,
1664                         world_atom=self._world_atom)
1665
1666                 return task
1667
1668         def _failed_pkg_msg(self, failed_pkg, action, preposition):
1669                 pkg = failed_pkg.pkg
1670                 msg = "%s to %s %s" % \
1671                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1672                 if pkg.root_config.settings["ROOT"] != "/":
1673                         msg += " %s %s" % (preposition, pkg.root)
1674
1675                 log_path = self._locate_failure_log(failed_pkg)
1676                 if log_path is not None:
1677                         msg += ", Log file:"
1678                 self._status_msg(msg)
1679
1680                 if log_path is not None:
1681                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1682
1683         def _status_msg(self, msg):
1684                 """
1685                 Display a brief status message (no newlines) in the status display.
1686                 This is called by tasks to provide feedback to the user. This
1687                 delegates the resposibility of generating \r and \n control characters,
1688                 to guarantee that lines are created or erased when necessary and
1689                 appropriate.
1690
1691                 @type msg: str
1692                 @param msg: a brief status message (no newlines allowed)
1693                 """
1694                 if not self._background:
1695                         writemsg_level("\n")
1696                 self._status_display.displayMessage(msg)
1697
1698         def _save_resume_list(self):
1699                 """
1700                 Do this before verifying the ebuild Manifests since it might
1701                 be possible for the user to use --resume --skipfirst get past
1702                 a non-essential package with a broken digest.
1703                 """
1704                 mtimedb = self._mtimedb
1705
1706                 mtimedb["resume"] = {}
1707                 # Stored as a dict starting with portage-2.1.6_rc1, and supported
1708                 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
1709                 # a list type for options.
1710                 mtimedb["resume"]["myopts"] = self.myopts.copy()
1711
1712                 # Convert Atom instances to plain str.
1713                 mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
1714                 mtimedb["resume"]["mergelist"] = [list(x) \
1715                         for x in self._mergelist \
1716                         if isinstance(x, Package) and x.operation == "merge"]
1717
1718                 mtimedb.commit()
1719
1720         def _calc_resume_list(self):
1721                 """
1722                 Use the current resume list to calculate a new one,
1723                 dropping any packages with unsatisfied deps.
1724                 @rtype: bool
1725                 @returns: True if successful, False otherwise.
1726                 """
1727                 print(colorize("GOOD", "*** Resuming merge..."))
1728
1729                 # free some memory before creating
1730                 # the resume depgraph
1731                 self._destroy_graph()
1732
1733                 myparams = create_depgraph_params(self.myopts, None)
1734                 success = False
1735                 e = None
1736                 try:
1737                         success, mydepgraph, dropped_tasks = resume_depgraph(
1738                                 self.settings, self.trees, self._mtimedb, self.myopts,
1739                                 myparams, self._spinner)
1740                 except depgraph.UnsatisfiedResumeDep as exc:
1741                         # rename variable to avoid python-3.0 error:
1742                         # SyntaxError: can not delete variable 'e' referenced in nested
1743                         #              scope
1744                         e = exc
1745                         mydepgraph = e.depgraph
1746                         dropped_tasks = set()
1747
1748                 if e is not None:
1749                         def unsatisfied_resume_dep_msg():
1750                                 mydepgraph.display_problems()
1751                                 out = portage.output.EOutput()
1752                                 out.eerror("One or more packages are either masked or " + \
1753                                         "have missing dependencies:")
1754                                 out.eerror("")
1755                                 indent = "  "
1756                                 show_parents = set()
1757                                 for dep in e.value:
1758                                         if dep.parent in show_parents:
1759                                                 continue
1760                                         show_parents.add(dep.parent)
1761                                         if dep.atom is None:
1762                                                 out.eerror(indent + "Masked package:")
1763                                                 out.eerror(2 * indent + str(dep.parent))
1764                                                 out.eerror("")
1765                                         else:
1766                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
1767                                                 out.eerror(2 * indent + str(dep.parent))
1768                                                 out.eerror("")
1769                                 msg = "The resume list contains packages " + \
1770                                         "that are either masked or have " + \
1771                                         "unsatisfied dependencies. " + \
1772                                         "Please restart/continue " + \
1773                                         "the operation manually, or use --skipfirst " + \
1774                                         "to skip the first package in the list and " + \
1775                                         "any other packages that may be " + \
1776                                         "masked or have missing dependencies."
1777                                 for line in textwrap.wrap(msg, 72):
1778                                         out.eerror(line)
1779                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1780                         return False
1781
1782                 if success and self._show_list():
1783                         mylist = mydepgraph.altlist()
1784                         if mylist:
1785                                 if "--tree" in self.myopts:
1786                                         mylist.reverse()
1787                                 mydepgraph.display(mylist, favorites=self._favorites)
1788
1789                 if not success:
1790                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1791                         return False
1792                 mydepgraph.display_problems()
1793                 self._init_graph(mydepgraph.schedulerGraph())
1794
1795                 msg_width = 75
1796                 for task in dropped_tasks:
1797                         if not (isinstance(task, Package) and task.operation == "merge"):
1798                                 continue
1799                         pkg = task
1800                         msg = "emerge --keep-going:" + \
1801                                 " %s" % (pkg.cpv,)
1802                         if pkg.root_config.settings["ROOT"] != "/":
1803                                 msg += " for %s" % (pkg.root,)
1804                         msg += " dropped due to unsatisfied dependency."
1805                         for line in textwrap.wrap(msg, msg_width):
1806                                 eerror(line, phase="other", key=pkg.cpv)
1807                         settings = self.pkgsettings[pkg.root]
1808                         # Ensure that log collection from $T is disabled inside
1809                         # elog_process(), since any logs that might exist are
1810                         # not valid here.
1811                         settings.pop("T", None)
1812                         portage.elog.elog_process(pkg.cpv, settings)
1813                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1814
1815                 return True
1816
1817         def _show_list(self):
1818                 myopts = self.myopts
1819                 if "--quiet" not in myopts and \
1820                         ("--ask" in myopts or "--tree" in myopts or \
1821                         "--verbose" in myopts):
1822                         return True
1823                 return False
1824
1825         def _world_atom(self, pkg):
1826                 """
1827                 Add or remove the package to the world file, but only if
1828                 it's supposed to be added or removed. Otherwise, do nothing.
1829                 """
1830
1831                 if set(("--buildpkgonly", "--fetchonly",
1832                         "--fetch-all-uri",
1833                         "--oneshot", "--onlydeps",
1834                         "--pretend")).intersection(self.myopts):
1835                         return
1836
1837                 if pkg.root != self.target_root:
1838                         return
1839
1840                 args_set = self._args_set
1841                 if not args_set.findAtomForPackage(pkg):
1842                         return
1843
1844                 logger = self._logger
1845                 pkg_count = self._pkg_count
1846                 root_config = pkg.root_config
1847                 world_set = root_config.sets["selected"]
1848                 world_locked = False
1849                 if hasattr(world_set, "lock"):
1850                         world_set.lock()
1851                         world_locked = True
1852
1853                 try:
1854                         if hasattr(world_set, "load"):
1855                                 world_set.load() # maybe it's changed on disk
1856
1857                         if pkg.operation == "uninstall":
1858                                 if hasattr(world_set, "cleanPackage"):
1859                                         world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
1860                                                         pkg.cpv)
1861                                 if hasattr(world_set, "remove"):
1862                                         for s in pkg.root_config.setconfig.active:
1863                                                 world_set.remove(SETPREFIX+s)
1864                         else:
1865                                 atom = create_world_atom(pkg, args_set, root_config)
1866                                 if atom:
1867                                         if hasattr(world_set, "add"):
1868                                                 self._status_msg(('Recording %s in "world" ' + \
1869                                                         'favorites file...') % atom)
1870                                                 logger.log(" === (%s of %s) Updating world file (%s)" % \
1871                                                         (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1872                                                 world_set.add(atom)
1873                                         else:
1874                                                 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1875                                                         (atom,), level=logging.WARN, noiselevel=-1)
1876                 finally:
1877                         if world_locked:
1878                                 world_set.unlock()
1879
1880         def _pkg(self, cpv, type_name, root_config, installed=False,
1881                 operation=None, myrepo=None):
1882                 """
1883                 Get a package instance from the cache, or create a new
1884                 one if necessary. Raises KeyError from aux_get if it
1885                 failures for some reason (package does not exist or is
1886                 corrupt).
1887                 """
1888
1889                 # Reuse existing instance when available.
1890                 pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
1891                         type_name=type_name, repo_name=myrepo, root_config=root_config,
1892                         installed=installed, operation=operation))
1893
1894                 if pkg is not None:
1895                         return pkg
1896
1897                 tree_type = depgraph.pkg_tree_map[type_name]
1898                 db = root_config.trees[tree_type].dbapi
1899                 db_keys = list(self.trees[root_config.root][
1900                         tree_type].dbapi._aux_cache_keys)
1901                 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
1902                 pkg = Package(built=(type_name != "ebuild"),
1903                         cpv=cpv, installed=installed, metadata=metadata,
1904                         root_config=root_config, type_name=type_name)
1905                 self._pkg_cache[pkg] = pkg
1906                 return pkg