e94b0461bb135238bb2e38ba4fc6afdf4e51c162
[portage.git] / pym / _emerge / Scheduler.py
1 # Copyright 1999-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3
4 from __future__ import print_function
5
6 import gc
7 import gzip
8 import logging
9 import shutil
10 import sys
11 import tempfile
12 import textwrap
13 import time
14 import weakref
15 import zlib
16
17 import portage
18 from portage import StringIO
19 from portage import os
20 from portage import _encodings
21 from portage import _unicode_encode
22 from portage.cache.mappings import slot_dict_class
23 from portage.const import LIBC_PACKAGE_ATOM
24 from portage.elog.messages import eerror
25 from portage.localization import _
26 from portage.output import colorize, create_color_func, red
27 bad = create_color_func("BAD")
28 from portage._sets import SETPREFIX
29 from portage._sets.base import InternalPackageSet
30 from portage.util import writemsg, writemsg_level
31 from portage.package.ebuild.digestcheck import digestcheck
32 from portage.package.ebuild.digestgen import digestgen
33 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
34
35 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
36 from _emerge.Blocker import Blocker
37 from _emerge.BlockerDB import BlockerDB
38 from _emerge.clear_caches import clear_caches
39 from _emerge.create_depgraph_params import create_depgraph_params
40 from _emerge.create_world_atom import create_world_atom
41 from _emerge.DepPriority import DepPriority
42 from _emerge.depgraph import depgraph, resume_depgraph
43 from _emerge.EbuildFetcher import EbuildFetcher
44 from _emerge.EbuildPhase import EbuildPhase
45 from _emerge.emergelog import emergelog, _emerge_log_dir
46 from _emerge.FakeVartree import FakeVartree
47 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
48 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
49 from _emerge.JobStatusDisplay import JobStatusDisplay
50 from _emerge.MergeListItem import MergeListItem
51 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
52 from _emerge.Package import Package
53 from _emerge.PackageMerge import PackageMerge
54 from _emerge.PollScheduler import PollScheduler
55 from _emerge.RootConfig import RootConfig
56 from _emerge.SlotObject import SlotObject
57 from _emerge.SequentialTaskQueue import SequentialTaskQueue
58
59 if sys.hexversion >= 0x3000000:
60         basestring = str
61
62 class Scheduler(PollScheduler):
63
64         _opts_ignore_blockers = \
65                 frozenset(["--buildpkgonly",
66                 "--fetchonly", "--fetch-all-uri",
67                 "--nodeps", "--pretend"])
68
69         _opts_no_background = \
70                 frozenset(["--pretend",
71                 "--fetchonly", "--fetch-all-uri"])
72
73         _opts_no_restart = frozenset(["--buildpkgonly",
74                 "--fetchonly", "--fetch-all-uri", "--pretend"])
75
76         _bad_resume_opts = set(["--ask", "--changelog",
77                 "--resume", "--skipfirst"])
78
79         _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
80
81         class _iface_class(SlotObject):
82                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
83                         "dblinkElog", "dblinkEmergeLog", "fetch",
84                         "output", "register", "schedule",
85                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
86                         "unregister")
87
88         class _fetch_iface_class(SlotObject):
89                 __slots__ = ("log_file", "schedule")
90
91         _task_queues_class = slot_dict_class(
92                 ("merge", "jobs", "fetch", "unpack"), prefix="")
93
94         class _build_opts_class(SlotObject):
95                 __slots__ = ("buildpkg", "buildpkgonly",
96                         "fetch_all_uri", "fetchonly", "pretend")
97
98         class _binpkg_opts_class(SlotObject):
99                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
100
101         class _pkg_count_class(SlotObject):
102                 __slots__ = ("curval", "maxval")
103
104         class _emerge_log_class(SlotObject):
105                 __slots__ = ("xterm_titles",)
106
107                 def log(self, *pargs, **kwargs):
108                         if not self.xterm_titles:
109                                 # Avoid interference with the scheduler's status display.
110                                 kwargs.pop("short_msg", None)
111                         emergelog(self.xterm_titles, *pargs, **kwargs)
112
113         class _failed_pkg(SlotObject):
114                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
115
116         class _ConfigPool(object):
117                 """Interface for a task to temporarily allocate a config
118                 instance from a pool. This allows a task to be constructed
119                 long before the config instance actually becomes needed, like
120                 when prefetchers are constructed for the whole merge list."""
121                 __slots__ = ("_root", "_allocate", "_deallocate")
122                 def __init__(self, root, allocate, deallocate):
123                         self._root = root
124                         self._allocate = allocate
125                         self._deallocate = deallocate
126                 def allocate(self):
127                         return self._allocate(self._root)
128                 def deallocate(self, settings):
129                         self._deallocate(settings)
130
131         class _unknown_internal_error(portage.exception.PortageException):
132                 """
133                 Used internally to terminate scheduling. The specific reason for
134                 the failure should have been dumped to stderr.
135                 """
136                 def __init__(self, value=""):
137                         portage.exception.PortageException.__init__(self, value)
138
139         def __init__(self, settings, trees, mtimedb, myopts,
140                 spinner, mergelist, favorites, digraph):
141                 PollScheduler.__init__(self)
142                 self.settings = settings
143                 self.target_root = settings["ROOT"]
144                 self.trees = trees
145                 self.myopts = myopts
146                 self._spinner = spinner
147                 self._mtimedb = mtimedb
148                 self._mergelist = mergelist
149                 self._favorites = favorites
150                 self._args_set = InternalPackageSet(favorites)
151                 self._build_opts = self._build_opts_class()
152                 for k in self._build_opts.__slots__:
153                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
154                 self._binpkg_opts = self._binpkg_opts_class()
155                 for k in self._binpkg_opts.__slots__:
156                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
157
158                 self.curval = 0
159                 self._logger = self._emerge_log_class()
160                 self._task_queues = self._task_queues_class()
161                 for k in self._task_queues.allowed_keys:
162                         setattr(self._task_queues, k,
163                                 SequentialTaskQueue())
164
165                 # Holds merges that will wait to be executed when no builds are
166                 # executing. This is useful for system packages since dependencies
167                 # on system packages are frequently unspecified.
168                 self._merge_wait_queue = []
169                 # Holds merges that have been transfered from the merge_wait_queue to
170                 # the actual merge queue. They are removed from this list upon
171                 # completion. Other packages can start building only when this list is
172                 # empty.
173                 self._merge_wait_scheduled = []
174
175                 # Holds system packages and their deep runtime dependencies. Before
176                 # being merged, these packages go to merge_wait_queue, to be merged
177                 # when no other packages are building.
178                 self._deep_system_deps = set()
179
180                 # Holds packages to merge which will satisfy currently unsatisfied
181                 # deep runtime dependencies of system packages. If this is not empty
182                 # then no parallel builds will be spawned until it is empty. This
183                 # minimizes the possibility that a build will fail due to the system
184                 # being in a fragile state. For example, see bug #259954.
185                 self._unsatisfied_system_deps = set()
186
187                 self._status_display = JobStatusDisplay(
188                         xterm_titles=('notitles' not in settings.features))
189                 self._max_load = myopts.get("--load-average")
190                 max_jobs = myopts.get("--jobs")
191                 if max_jobs is None:
192                         max_jobs = 1
193                 self._set_max_jobs(max_jobs)
194
195                 # The root where the currently running
196                 # portage instance is installed.
197                 self._running_root = trees["/"]["root_config"]
198                 self.edebug = 0
199                 if settings.get("PORTAGE_DEBUG", "") == "1":
200                         self.edebug = 1
201                 self.pkgsettings = {}
202                 self._config_pool = {}
203                 for root in self.trees:
204                         self._config_pool[root] = []
205
206                 self._init_installed_graph()
207
208                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
209                         schedule=self._schedule_fetch)
210                 self._sched_iface = self._iface_class(
211                         dblinkEbuildPhase=self._dblink_ebuild_phase,
212                         dblinkDisplayMerge=self._dblink_display_merge,
213                         dblinkElog=self._dblink_elog,
214                         dblinkEmergeLog=self._dblink_emerge_log,
215                         fetch=fetch_iface, output=self._task_output,
216                         register=self._register,
217                         schedule=self._schedule_wait,
218                         scheduleSetup=self._schedule_setup,
219                         scheduleUnpack=self._schedule_unpack,
220                         scheduleYield=self._schedule_yield,
221                         unregister=self._unregister)
222
223                 self._prefetchers = weakref.WeakValueDictionary()
224                 self._pkg_queue = []
225                 self._running_tasks = set()
226                 self._completed_tasks = set()
227
228                 self._failed_pkgs = []
229                 self._failed_pkgs_all = []
230                 self._failed_pkgs_die_msgs = []
231                 self._post_mod_echo_msgs = []
232                 self._parallel_fetch = False
233                 merge_count = len([x for x in mergelist \
234                         if isinstance(x, Package) and x.operation == "merge"])
235                 self._pkg_count = self._pkg_count_class(
236                         curval=0, maxval=merge_count)
237                 self._status_display.maxval = self._pkg_count.maxval
238
239                 # The load average takes some time to respond when new
240                 # jobs are added, so we need to limit the rate of adding
241                 # new jobs.
242                 self._job_delay_max = 10
243                 self._job_delay_factor = 1.0
244                 self._job_delay_exp = 1.5
245                 self._previous_job_start_time = None
246
247                 self._set_digraph(digraph)
248
249                 # This is used to memoize the _choose_pkg() result when
250                 # no packages can be chosen until one of the existing
251                 # jobs completes.
252                 self._choose_pkg_return_early = False
253
254                 features = self.settings.features
255                 if "parallel-fetch" in features and \
256                         not ("--pretend" in self.myopts or \
257                         "--fetch-all-uri" in self.myopts or \
258                         "--fetchonly" in self.myopts):
259                         if "distlocks" not in features:
260                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
261                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
262                                         "requires the distlocks feature enabled"+"\n",
263                                         noiselevel=-1)
264                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
265                                         "thus parallel-fetching is being disabled"+"\n",
266                                         noiselevel=-1)
267                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
268                         elif merge_count > 1:
269                                 self._parallel_fetch = True
270
271                 if self._parallel_fetch:
272                                 # clear out existing fetch log if it exists
273                                 try:
274                                         open(self._fetch_log, 'w')
275                                 except EnvironmentError:
276                                         pass
277
278                 self._running_portage = None
279                 portage_match = self._running_root.trees["vartree"].dbapi.match(
280                         portage.const.PORTAGE_PACKAGE_ATOM)
281                 if portage_match:
282                         cpv = portage_match.pop()
283                         self._running_portage = self._pkg(cpv, "installed",
284                                 self._running_root, installed=True)
285
286         def _init_installed_graph(self):
287                 """
288                 Initialization structures used for dependency calculations
289                 involving currently installed packages.
290                 """
291                 # TODO: Replace the BlockerDB with a depgraph of installed packages
292                 # that's updated incrementally with each upgrade/uninstall operation
293                 # This will be useful for making quick and safe decisions with respect
294                 # to aggressive parallelization discussed in bug #279623.
295                 self._blocker_db = {}
296                 for root in self.trees:
297                         self._blocker_db[root] = \
298                                 BlockerDB(FakeVartree(self.trees[root]["root_config"]))
299
300         def _destroy_installed_graph(self):
301                 """
302                 Use this to free memory before calling _calc_resume_list().
303                 After _calc_resume_list(), the _init_installed_graph() and
304                 _set_digraph() methods need to be called in order to
305                 re-generate the structures that this method destroys. 
306                 """
307                 self._blocker_db = None
308                 self._set_digraph(None)
309                 gc.collect()
310
311         def _poll(self, timeout=None):
312                 self._schedule()
313                 PollScheduler._poll(self, timeout=timeout)
314
315         def _set_max_jobs(self, max_jobs):
316                 self._max_jobs = max_jobs
317                 self._task_queues.jobs.max_jobs = max_jobs
318
319         def _background_mode(self):
320                 """
321                 Check if background mode is enabled and adjust states as necessary.
322
323                 @rtype: bool
324                 @returns: True if background mode is enabled, False otherwise.
325                 """
326                 background = (self._max_jobs is True or \
327                         self._max_jobs > 1 or "--quiet" in self.myopts \
328                         or "--quiet-build" in self.myopts) and \
329                         not bool(self._opts_no_background.intersection(self.myopts))
330
331                 if background:
332                         interactive_tasks = self._get_interactive_tasks()
333                         if interactive_tasks:
334                                 background = False
335                                 writemsg_level(">>> Sending package output to stdio due " + \
336                                         "to interactive package(s):\n",
337                                         level=logging.INFO, noiselevel=-1)
338                                 msg = [""]
339                                 for pkg in interactive_tasks:
340                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
341                                         if pkg.root != "/":
342                                                 pkg_str += " for " + pkg.root
343                                         msg.append(pkg_str)
344                                 msg.append("")
345                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
346                                         level=logging.INFO, noiselevel=-1)
347                                 if self._max_jobs is True or self._max_jobs > 1:
348                                         self._set_max_jobs(1)
349                                         writemsg_level(">>> Setting --jobs=1 due " + \
350                                                 "to the above interactive package(s)\n",
351                                                 level=logging.INFO, noiselevel=-1)
352                                         writemsg_level(">>> In order to temporarily mask " + \
353                                                 "interactive updates, you may\n" + \
354                                                 ">>> specify --accept-properties=-interactive\n",
355                                                 level=logging.INFO, noiselevel=-1)
356                 self._status_display.quiet = \
357                         not background or \
358                         ("--quiet" in self.myopts and \
359                         "--verbose" not in self.myopts)
360
361                 self._logger.xterm_titles = \
362                         "notitles" not in self.settings.features and \
363                         self._status_display.quiet
364
365                 return background
366
367         def _get_interactive_tasks(self):
368                 interactive_tasks = []
369                 for task in self._mergelist:
370                         if not (isinstance(task, Package) and \
371                                 task.operation == "merge"):
372                                 continue
373                         if 'interactive' in task.metadata.properties:
374                                 interactive_tasks.append(task)
375                 return interactive_tasks
376
377         def _set_digraph(self, digraph):
378                 if "--nodeps" in self.myopts or \
379                         digraph is None or \
380                         (self._max_jobs is not True and self._max_jobs < 2):
381                         # save some memory
382                         self._digraph = None
383                         return
384
385                 self._digraph = digraph
386                 self._find_system_deps()
387                 self._prune_digraph()
388                 self._prevent_builddir_collisions()
389                 self._implicit_libc_deps()
390                 if '--debug' in self.myopts:
391                         writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
392                         self._digraph.debug_print()
393                         writemsg("\n", noiselevel=-1)
394
395         def _find_system_deps(self):
396                 """
397                 Find system packages and their deep runtime dependencies. Before being
398                 merged, these packages go to merge_wait_queue, to be merged when no
399                 other packages are building.
400                 NOTE: This can only find deep system deps if the system set has been
401                 added to the graph and traversed deeply (the depgraph "complete"
402                 parameter will do this, triggered by emerge --complete-graph option).
403                 """
404                 deep_system_deps = self._deep_system_deps
405                 deep_system_deps.clear()
406                 deep_system_deps.update(
407                         _find_deep_system_runtime_deps(self._digraph))
408                 deep_system_deps.difference_update([pkg for pkg in \
409                         deep_system_deps if pkg.operation != "merge"])
410
411         def _prune_digraph(self):
412                 """
413                 Prune any root nodes that are irrelevant.
414                 """
415
416                 graph = self._digraph
417                 completed_tasks = self._completed_tasks
418                 removed_nodes = set()
419                 while True:
420                         for node in graph.root_nodes():
421                                 if not isinstance(node, Package) or \
422                                         (node.installed and node.operation == "nomerge") or \
423                                         node.onlydeps or \
424                                         node in completed_tasks:
425                                         removed_nodes.add(node)
426                         if removed_nodes:
427                                 graph.difference_update(removed_nodes)
428                         if not removed_nodes:
429                                 break
430                         removed_nodes.clear()
431
432         def _prevent_builddir_collisions(self):
433                 """
434                 When building stages, sometimes the same exact cpv needs to be merged
435                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
436                 in the builddir. Currently, normal file locks would be inappropriate
437                 for this purpose since emerge holds all of it's build dir locks from
438                 the main process.
439                 """
440                 cpv_map = {}
441                 for pkg in self._mergelist:
442                         if not isinstance(pkg, Package):
443                                 # a satisfied blocker
444                                 continue
445                         if pkg.installed:
446                                 continue
447                         if pkg.cpv not in cpv_map:
448                                 cpv_map[pkg.cpv] = [pkg]
449                                 continue
450                         for earlier_pkg in cpv_map[pkg.cpv]:
451                                 self._digraph.add(earlier_pkg, pkg,
452                                         priority=DepPriority(buildtime=True))
453                         cpv_map[pkg.cpv].append(pkg)
454
455         def _implicit_libc_deps(self):
456                 """
457                 Create implicit dependencies on libc, in order to ensure that libc
458                 is installed as early as possible (see bug #303567). If the merge
459                 list contains both a new-style virtual and an old-style PROVIDE
460                 virtual, the new-style virtual is used.
461                 """
462                 implicit_libc_roots = set([self._running_root.root])
463                 libc_set = InternalPackageSet([LIBC_PACKAGE_ATOM])
464                 norm_libc_pkgs = {}
465                 virt_libc_pkgs = {}
466                 for pkg in self._mergelist:
467                         if not isinstance(pkg, Package):
468                                 # a satisfied blocker
469                                 continue
470                         if pkg.installed:
471                                 continue
472                         if pkg.root in implicit_libc_roots and \
473                                 pkg.operation == 'merge':
474                                 if libc_set.findAtomForPackage(pkg):
475                                         if pkg.category == 'virtual':
476                                                 d = virt_libc_pkgs
477                                         else:
478                                                 d = norm_libc_pkgs
479                                         if pkg.root in d:
480                                                 raise AssertionError(
481                                                         "found 2 libc matches: %s and %s" % \
482                                                         (d[pkg.root], pkg))
483                                         d[pkg.root] = pkg
484
485                 # Prefer new-style virtuals over old-style PROVIDE virtuals.
486                 libc_pkg_map = norm_libc_pkgs.copy()
487                 libc_pkg_map.update(virt_libc_pkgs)
488
489                 # Only add a dep when the version changes.
490                 for libc_pkg in list(libc_pkg_map.values()):
491                         if libc_pkg.root_config.trees['vartree'].dbapi.cpv_exists(
492                                 libc_pkg.cpv):
493                                 del libc_pkg_map[pkg.root]
494
495                 if not libc_pkg_map:
496                         return
497
498                 libc_pkgs = set(libc_pkg_map.values())
499                 earlier_libc_pkgs = set()
500
501                 for pkg in self._mergelist:
502                         if not isinstance(pkg, Package):
503                                 # a satisfied blocker
504                                 continue
505                         if pkg.installed:
506                                 continue
507                         if pkg.root in implicit_libc_roots and \
508                                 pkg.operation == 'merge':
509                                 if pkg in libc_pkgs:
510                                         earlier_libc_pkgs.add(pkg)
511                                 else:
512                                         my_libc = libc_pkg_map.get(pkg.root)
513                                         if my_libc is not None and \
514                                                 my_libc in earlier_libc_pkgs:
515                                                 self._digraph.add(my_libc, pkg,
516                                                         priority=DepPriority(buildtime=True))
517
518         class _pkg_failure(portage.exception.PortageException):
519                 """
520                 An instance of this class is raised by unmerge() when
521                 an uninstallation fails.
522                 """
523                 status = 1
524                 def __init__(self, *pargs):
525                         portage.exception.PortageException.__init__(self, pargs)
526                         if pargs:
527                                 self.status = pargs[0]
528
529         def _schedule_fetch(self, fetcher):
530                 """
531                 Schedule a fetcher on the fetch queue, in order to
532                 serialize access to the fetch log.
533                 """
534                 self._task_queues.fetch.addFront(fetcher)
535
536         def _schedule_setup(self, setup_phase):
537                 """
538                 Schedule a setup phase on the merge queue, in order to
539                 serialize unsandboxed access to the live filesystem.
540                 """
541                 self._task_queues.merge.add(setup_phase)
542                 self._schedule()
543
544         def _schedule_unpack(self, unpack_phase):
545                 """
546                 Schedule an unpack phase on the unpack queue, in order
547                 to serialize $DISTDIR access for live ebuilds.
548                 """
549                 self._task_queues.unpack.add(unpack_phase)
550
551         def _find_blockers(self, new_pkg):
552                 """
553                 Returns a callable which should be called only when
554                 the vdb lock has been acquired.
555                 """
556                 def get_blockers():
557                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
558                 return get_blockers
559
560         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
561                 if self._opts_ignore_blockers.intersection(self.myopts):
562                         return None
563
564                 # Call gc.collect() here to avoid heap overflow that
565                 # triggers 'Cannot allocate memory' errors (reported
566                 # with python-2.5).
567                 gc.collect()
568
569                 blocker_db = self._blocker_db[new_pkg.root]
570
571                 blocker_dblinks = []
572                 for blocking_pkg in blocker_db.findInstalledBlockers(
573                         new_pkg, acquire_lock=acquire_lock):
574                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
575                                 continue
576                         if new_pkg.cpv == blocking_pkg.cpv:
577                                 continue
578                         blocker_dblinks.append(portage.dblink(
579                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
580                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
581                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
582
583                 gc.collect()
584
585                 return blocker_dblinks
586
587         def _dblink_pkg(self, pkg_dblink):
588                 cpv = pkg_dblink.mycpv
589                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
590                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
591                 installed = type_name == "installed"
592                 return self._pkg(cpv, type_name, root_config, installed=installed)
593
594         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
595
596                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
597                 out = StringIO()
598
599                 for msg in msgs:
600                         func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
601
602                 out_str = out.getvalue()
603
604                 self._task_output(out_str, log_path=log_path)
605
606         def _dblink_emerge_log(self, msg):
607                 self._logger.log(msg)
608
609         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
610                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
611                 background = self._background
612
613                 if log_path is None:
614                         if not (background and level < logging.WARN):
615                                 portage.util.writemsg_level(msg,
616                                         level=level, noiselevel=noiselevel)
617                 else:
618                         self._task_output(msg, log_path=log_path)
619
620         def _dblink_ebuild_phase(self,
621                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
622                 """
623                 Using this callback for merge phases allows the scheduler
624                 to run while these phases execute asynchronously, and allows
625                 the scheduler control output handling.
626                 """
627
628                 scheduler = self._sched_iface
629                 settings = pkg_dblink.settings
630                 pkg = self._dblink_pkg(pkg_dblink)
631                 background = self._background
632                 log_path = settings.get("PORTAGE_LOG_FILE")
633
634                 if phase in ('die_hooks', 'success_hooks'):
635                         ebuild_phase = MiscFunctionsProcess(background=background,
636                                 commands=[phase], phase=phase,
637                                 scheduler=scheduler, settings=settings)
638                 else:
639                         ebuild_phase = EbuildPhase(background=background,
640                                 phase=phase, scheduler=scheduler,
641                                 settings=settings)
642                 ebuild_phase.start()
643                 ebuild_phase.wait()
644
645                 return ebuild_phase.returncode
646
647         def _generate_digests(self):
648                 """
649                 Generate digests if necessary for --digests or FEATURES=digest.
650                 In order to avoid interference, this must done before parallel
651                 tasks are started.
652                 """
653
654                 if '--fetchonly' in self.myopts:
655                         return os.EX_OK
656
657                 digest = '--digest' in self.myopts
658                 if not digest:
659                         for pkgsettings in self.pkgsettings.values():
660                                 if pkgsettings.mycpv is not None:
661                                         # ensure that we are using global features
662                                         # settings rather than those from package.env
663                                         pkgsettings.reset()
664                                 if 'digest' in pkgsettings.features:
665                                         digest = True
666                                         break
667
668                 if not digest:
669                         return os.EX_OK
670
671                 for x in self._mergelist:
672                         if not isinstance(x, Package) or \
673                                 x.type_name != 'ebuild' or \
674                                 x.operation != 'merge':
675                                 continue
676                         pkgsettings = self.pkgsettings[x.root]
677                         if pkgsettings.mycpv is not None:
678                                 # ensure that we are using global features
679                                 # settings rather than those from package.env
680                                 pkgsettings.reset()
681                         if '--digest' not in self.myopts and \
682                                 'digest' not in pkgsettings.features:
683                                 continue
684                         portdb = x.root_config.trees['porttree'].dbapi
685                         ebuild_path = portdb.findname(x.cpv)
686                         if ebuild_path is None:
687                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
688                         pkgsettings['O'] = os.path.dirname(ebuild_path)
689                         if not digestgen(mysettings=pkgsettings, myportdb=portdb):
690                                 writemsg_level(
691                                         "!!! Unable to generate manifest for '%s'.\n" \
692                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
693                                 return 1
694
695                 return os.EX_OK
696
697         def _env_sanity_check(self):
698                 """
699                 Verify a sane environment before trying to build anything from source.
700                 """
701                 have_src_pkg = False
702                 for x in self._mergelist:
703                         if isinstance(x, Package) and not x.built:
704                                 have_src_pkg = True
705                                 break
706
707                 if not have_src_pkg:
708                         return os.EX_OK
709
710                 for settings in self.pkgsettings.values():
711                         for var in ("ARCH", ):
712                                 value = settings.get(var)
713                                 if value and value.strip():
714                                         continue
715                                 msg = _("%(var)s is not set... "
716                                         "Are you missing the '%(configroot)setc/make.profile' symlink? "
717                                         "Is the symlink correct? "
718                                         "Is your portage tree complete?") % \
719                                         {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
720
721                                 out = portage.output.EOutput()
722                                 for line in textwrap.wrap(msg, 70):
723                                         out.eerror(line)
724                                 return 1
725
726                 return os.EX_OK
727
728         def _check_manifests(self):
729                 # Verify all the manifests now so that the user is notified of failure
730                 # as soon as possible.
731                 if "strict" not in self.settings.features or \
732                         "--fetchonly" in self.myopts or \
733                         "--fetch-all-uri" in self.myopts:
734                         return os.EX_OK
735
736                 shown_verifying_msg = False
737                 quiet_settings = {}
738                 for myroot, pkgsettings in self.pkgsettings.items():
739                         quiet_config = portage.config(clone=pkgsettings)
740                         quiet_config["PORTAGE_QUIET"] = "1"
741                         quiet_config.backup_changes("PORTAGE_QUIET")
742                         quiet_settings[myroot] = quiet_config
743                         del quiet_config
744
745                 failures = 0
746
747                 for x in self._mergelist:
748                         if not isinstance(x, Package) or \
749                                 x.type_name != "ebuild":
750                                 continue
751
752                         if x.operation == "uninstall":
753                                 continue
754
755                         if not shown_verifying_msg:
756                                 shown_verifying_msg = True
757                                 self._status_msg("Verifying ebuild manifests")
758
759                         root_config = x.root_config
760                         portdb = root_config.trees["porttree"].dbapi
761                         quiet_config = quiet_settings[root_config.root]
762                         ebuild_path = portdb.findname(x.cpv)
763                         if ebuild_path is None:
764                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
765                         quiet_config["O"] = os.path.dirname(ebuild_path)
766                         if not digestcheck([], quiet_config, strict=True):
767                                 failures |= 1
768
769                 if failures:
770                         return 1
771                 return os.EX_OK
772
773         def _add_prefetchers(self):
774
775                 if not self._parallel_fetch:
776                         return
777
778                 if self._parallel_fetch:
779                         self._status_msg("Starting parallel fetch")
780
781                         prefetchers = self._prefetchers
782                         getbinpkg = "--getbinpkg" in self.myopts
783
784                         # In order to avoid "waiting for lock" messages
785                         # at the beginning, which annoy users, never
786                         # spawn a prefetcher for the first package.
787                         for pkg in self._mergelist[1:]:
788                                 # mergelist can contain solved Blocker instances
789                                 if not isinstance(pkg, Package) or pkg.operation == "uninstall":
790                                         continue
791                                 prefetcher = self._create_prefetcher(pkg)
792                                 if prefetcher is not None:
793                                         self._task_queues.fetch.add(prefetcher)
794                                         prefetchers[pkg] = prefetcher
795
796         def _create_prefetcher(self, pkg):
797                 """
798                 @return: a prefetcher, or None if not applicable
799                 """
800                 prefetcher = None
801
802                 if not isinstance(pkg, Package):
803                         pass
804
805                 elif pkg.type_name == "ebuild":
806
807                         prefetcher = EbuildFetcher(background=True,
808                                 config_pool=self._ConfigPool(pkg.root,
809                                 self._allocate_config, self._deallocate_config),
810                                 fetchonly=1, logfile=self._fetch_log,
811                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
812
813                 elif pkg.type_name == "binary" and \
814                         "--getbinpkg" in self.myopts and \
815                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
816
817                         prefetcher = BinpkgPrefetcher(background=True,
818                                 pkg=pkg, scheduler=self._sched_iface)
819
820                 return prefetcher
821
822         def _is_restart_scheduled(self):
823                 """
824                 Check if the merge list contains a replacement
825                 for the current running instance, that will result
826                 in restart after merge.
827                 @rtype: bool
828                 @returns: True if a restart is scheduled, False otherwise.
829                 """
830                 if self._opts_no_restart.intersection(self.myopts):
831                         return False
832
833                 mergelist = self._mergelist
834
835                 for i, pkg in enumerate(mergelist):
836                         if self._is_restart_necessary(pkg) and \
837                                 i != len(mergelist) - 1:
838                                 return True
839
840                 return False
841
842         def _is_restart_necessary(self, pkg):
843                 """
844                 @return: True if merging the given package
845                         requires restart, False otherwise.
846                 """
847
848                 # Figure out if we need a restart.
849                 if pkg.root == self._running_root.root and \
850                         portage.match_from_list(
851                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
852                         if self._running_portage is None:
853                                 return True
854                         elif pkg.cpv != self._running_portage.cpv or \
855                                 '9999' in pkg.cpv or \
856                                 'git' in pkg.inherited:
857                                 return True
858                 return False
859
860         def _restart_if_necessary(self, pkg):
861                 """
862                 Use execv() to restart emerge. This happens
863                 if portage upgrades itself and there are
864                 remaining packages in the list.
865                 """
866
867                 if self._opts_no_restart.intersection(self.myopts):
868                         return
869
870                 if not self._is_restart_necessary(pkg):
871                         return
872
873                 if pkg == self._mergelist[-1]:
874                         return
875
876                 self._main_loop_cleanup()
877
878                 logger = self._logger
879                 pkg_count = self._pkg_count
880                 mtimedb = self._mtimedb
881                 bad_resume_opts = self._bad_resume_opts
882
883                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
884                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
885
886                 logger.log(" *** RESTARTING " + \
887                         "emerge via exec() after change of " + \
888                         "portage version.")
889
890                 mtimedb["resume"]["mergelist"].remove(list(pkg))
891                 mtimedb.commit()
892                 portage.run_exitfuncs()
893                 # Don't trust sys.argv[0] here because eselect-python may modify it.
894                 emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
895                 mynewargv = [emerge_binary, "--resume"]
896                 resume_opts = self.myopts.copy()
897                 # For automatic resume, we need to prevent
898                 # any of bad_resume_opts from leaking in
899                 # via EMERGE_DEFAULT_OPTS.
900                 resume_opts["--ignore-default-opts"] = True
901                 for myopt, myarg in resume_opts.items():
902                         if myopt not in bad_resume_opts:
903                                 if myarg is True:
904                                         mynewargv.append(myopt)
905                                 else:
906                                         mynewargv.append(myopt +"="+ str(myarg))
907                 # priority only needs to be adjusted on the first run
908                 os.environ["PORTAGE_NICENESS"] = "0"
909                 os.execv(mynewargv[0], mynewargv)
910
911         def _run_pkg_pretend(self):
912                 shown_verifying_msg = False
913
914                 failures = 0
915
916                 for x in self._mergelist:
917                         if not isinstance(x, Package):
918                                 continue
919
920                         if x.operation == "uninstall":
921                                 continue
922
923                         if x.metadata["EAPI"] in ("0", "1", "2", "3"):
924                                 continue
925
926                         if "pretend" not in x.metadata.defined_phases:
927                                 continue
928
929                         if not shown_verifying_msg and self._background:
930                                 shown_verifying_msg = True
931                                 self._status_msg("Running pre-merge checks")
932
933                         if not self._background:
934                                 out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
935                                 portage.util.writemsg_stdout(out_str, noiselevel=-1)
936
937                         root_config = x.root_config
938                         settings = self.pkgsettings[root_config.root]
939                         tmpdir = tempfile.mkdtemp()
940                         tmpdir_orig = settings["PORTAGE_TMPDIR"]
941                         settings["PORTAGE_TMPDIR"] = tmpdir
942
943                         if x.built:
944                                 tree = "bintree"
945                                 bintree = root_config.trees["bintree"].dbapi.bintree
946                                 if bintree.isremote(x.cpv):
947                                         fetcher = BinpkgPrefetcher(background=self._background,
948                                                 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=x, scheduler=self._sched_iface)
949                                         fetcher.start()
950                                         fetcher.wait()
951
952                                 tbz2_file = bintree.getname(x.cpv)
953                                 infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
954                                 os.makedirs(infloc)
955                                 portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
956                                 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
957
958                         else:
959                                 tree = "porttree"
960                                 portdb = root_config.trees["porttree"].dbapi
961                                 ebuild_path = portdb.findname(x.cpv)
962                                 if ebuild_path is None:
963                                         raise AssertionError("ebuild not found for '%s'" % x.cpv)
964
965                         portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
966                                 "pretend", root_config.root, settings,
967                                 debug=(settings.get("PORTAGE_DEBUG", "") == 1),
968                                 mydbapi=self.trees[settings["ROOT"]][tree].dbapi, use_cache=1)
969                         prepare_build_dirs(root_config.root, settings, cleanup=0)
970
971                         vardb = root_config.trees['vartree'].dbapi
972                         settings["REPLACING_VERSIONS"] = " ".join(
973                                 set(portage.versions.cpv_getversion(match) \
974                                         for match in vardb.match(x.slot_atom) + \
975                                         vardb.match('='+x.cpv)))
976                         pretend_phase = EbuildPhase(background=self._background,
977                                 phase="pretend", scheduler=self._sched_iface,
978                                 settings=settings)
979
980                         pretend_phase.start()
981                         ret = pretend_phase.wait()
982
983                         portage.elog.elog_process(x.cpv, settings)
984
985                         if ret == os.EX_OK:
986                                 shutil.rmtree(tmpdir)
987                         settings["PORTAGE_TMPDIR"] = tmpdir_orig
988
989                         if ret != os.EX_OK:
990                                 failures += 1
991                 if failures:
992                         return 1
993                 return os.EX_OK
994
995         def merge(self):
996                 if "--resume" in self.myopts:
997                         # We're resuming.
998                         portage.writemsg_stdout(
999                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
1000                         self._logger.log(" *** Resuming merge...")
1001
1002                 self._save_resume_list()
1003
1004                 try:
1005                         self._background = self._background_mode()
1006                 except self._unknown_internal_error:
1007                         return 1
1008
1009                 for root in self.trees:
1010                         root_config = self.trees[root]["root_config"]
1011
1012                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
1013                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
1014                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
1015                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
1016                         if not tmpdir or not os.path.isdir(tmpdir):
1017                                 msg = "The directory specified in your " + \
1018                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
1019                                 "does not exist. Please create this " + \
1020                                 "directory or correct your PORTAGE_TMPDIR setting."
1021                                 msg = textwrap.wrap(msg, 70)
1022                                 out = portage.output.EOutput()
1023                                 for l in msg:
1024                                         out.eerror(l)
1025                                 return 1
1026
1027                         if self._background:
1028                                 root_config.settings.unlock()
1029                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
1030                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
1031                                 root_config.settings.lock()
1032
1033                         self.pkgsettings[root] = portage.config(
1034                                 clone=root_config.settings)
1035
1036                 keep_going = "--keep-going" in self.myopts
1037                 fetchonly = self._build_opts.fetchonly
1038                 mtimedb = self._mtimedb
1039                 failed_pkgs = self._failed_pkgs
1040
1041                 rval = self._generate_digests()
1042                 if rval != os.EX_OK:
1043                         return rval
1044
1045                 rval = self._env_sanity_check()
1046                 if rval != os.EX_OK:
1047                         return rval
1048
1049                 # TODO: Immediately recalculate deps here if --keep-going
1050                 #       is enabled and corrupt manifests are detected.
1051                 rval = self._check_manifests()
1052                 if rval != os.EX_OK and not keep_going:
1053                         return rval
1054
1055                 rval = self._run_pkg_pretend()
1056                 if rval != os.EX_OK:
1057                         return rval
1058
1059                 while True:
1060                         rval = self._merge()
1061                         if rval == os.EX_OK or fetchonly or not keep_going:
1062                                 break
1063                         if "resume" not in mtimedb:
1064                                 break
1065                         mergelist = self._mtimedb["resume"].get("mergelist")
1066                         if not mergelist:
1067                                 break
1068
1069                         if not failed_pkgs:
1070                                 break
1071
1072                         for failed_pkg in failed_pkgs:
1073                                 mergelist.remove(list(failed_pkg.pkg))
1074
1075                         self._failed_pkgs_all.extend(failed_pkgs)
1076                         del failed_pkgs[:]
1077
1078                         if not mergelist:
1079                                 break
1080
1081                         # free some memory before creating
1082                         # the resume depgraph
1083                         self._destroy_installed_graph()
1084
1085                         if not self._calc_resume_list():
1086                                 break
1087
1088                         clear_caches(self.trees)
1089                         if not self._mergelist:
1090                                 break
1091
1092                         # Initialize the installed graph again
1093                         # since it was destroyed above in order
1094                         # to free memory.
1095                         self._init_installed_graph()
1096                         self._save_resume_list()
1097                         self._pkg_count.curval = 0
1098                         self._pkg_count.maxval = len([x for x in self._mergelist \
1099                                 if isinstance(x, Package) and x.operation == "merge"])
1100                         self._status_display.maxval = self._pkg_count.maxval
1101
1102                 self._logger.log(" *** Finished. Cleaning up...")
1103
1104                 if failed_pkgs:
1105                         self._failed_pkgs_all.extend(failed_pkgs)
1106                         del failed_pkgs[:]
1107
1108                 printer = portage.output.EOutput()
1109                 background = self._background
1110                 failure_log_shown = False
1111                 if background and len(self._failed_pkgs_all) == 1:
1112                         # If only one package failed then just show it's
1113                         # whole log for easy viewing.
1114                         failed_pkg = self._failed_pkgs_all[-1]
1115                         build_dir = failed_pkg.build_dir
1116                         log_file = None
1117
1118                         log_paths = [failed_pkg.build_log]
1119
1120                         log_path = self._locate_failure_log(failed_pkg)
1121                         if log_path is not None:
1122                                 try:
1123                                         log_file = open(_unicode_encode(log_path,
1124                                                 encoding=_encodings['fs'], errors='strict'), mode='rb')
1125                                 except IOError:
1126                                         pass
1127                                 else:
1128                                         if log_path.endswith('.gz'):
1129                                                 log_file =  gzip.GzipFile(filename='',
1130                                                         mode='rb', fileobj=log_file)
1131
1132                         if log_file is not None:
1133                                 try:
1134                                         for line in log_file:
1135                                                 writemsg_level(line, noiselevel=-1)
1136                                 except zlib.error as e:
1137                                         writemsg_level("%s\n" % (e,), level=logging.ERROR,
1138                                                 noiselevel=-1)
1139                                 finally:
1140                                         log_file.close()
1141                                 failure_log_shown = True
1142
1143                 # Dump mod_echo output now since it tends to flood the terminal.
1144                 # This allows us to avoid having more important output, generated
1145                 # later, from being swept away by the mod_echo output.
1146                 mod_echo_output =  _flush_elog_mod_echo()
1147
1148                 if background and not failure_log_shown and \
1149                         self._failed_pkgs_all and \
1150                         self._failed_pkgs_die_msgs and \
1151                         not mod_echo_output:
1152
1153                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1154                                 root_msg = ""
1155                                 if mysettings["ROOT"] != "/":
1156                                         root_msg = " merged to %s" % mysettings["ROOT"]
1157                                 print()
1158                                 printer.einfo("Error messages for package %s%s:" % \
1159                                         (colorize("INFORM", key), root_msg))
1160                                 print()
1161                                 for phase in portage.const.EBUILD_PHASES:
1162                                         if phase not in logentries:
1163                                                 continue
1164                                         for msgtype, msgcontent in logentries[phase]:
1165                                                 if isinstance(msgcontent, basestring):
1166                                                         msgcontent = [msgcontent]
1167                                                 for line in msgcontent:
1168                                                         printer.eerror(line.strip("\n"))
1169
1170                 if self._post_mod_echo_msgs:
1171                         for msg in self._post_mod_echo_msgs:
1172                                 msg()
1173
1174                 if len(self._failed_pkgs_all) > 1 or \
1175                         (self._failed_pkgs_all and keep_going):
1176                         if len(self._failed_pkgs_all) > 1:
1177                                 msg = "The following %d packages have " % \
1178                                         len(self._failed_pkgs_all) + \
1179                                         "failed to build or install:"
1180                         else:
1181                                 msg = "The following package has " + \
1182                                         "failed to build or install:"
1183
1184                         printer.eerror("")
1185                         for line in textwrap.wrap(msg, 72):
1186                                 printer.eerror(line)
1187                         printer.eerror("")
1188                         for failed_pkg in self._failed_pkgs_all:
1189                                 msg = " %s" % (colorize('INFORM', failed_pkg.pkg.__str__()),)
1190                                 log_path = self._locate_failure_log(failed_pkg)
1191                                 if log_path is not None:
1192                                         msg += ", Log file:"
1193                                 printer.eerror(msg)
1194                                 if log_path is not None:
1195                                         printer.eerror("  '%s'" % colorize('INFORM', log_path))
1196                         printer.eerror("")
1197
1198                 if self._failed_pkgs_all:
1199                         return 1
1200                 return os.EX_OK
1201
1202         def _elog_listener(self, mysettings, key, logentries, fulltext):
1203                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1204                 if errors:
1205                         self._failed_pkgs_die_msgs.append(
1206                                 (mysettings, key, errors))
1207
1208         def _locate_failure_log(self, failed_pkg):
1209
1210                 build_dir = failed_pkg.build_dir
1211                 log_file = None
1212
1213                 log_paths = [failed_pkg.build_log]
1214
1215                 for log_path in log_paths:
1216                         if not log_path:
1217                                 continue
1218
1219                         try:
1220                                 log_size = os.stat(log_path).st_size
1221                         except OSError:
1222                                 continue
1223
1224                         if log_size == 0:
1225                                 continue
1226
1227                         return log_path
1228
1229                 return None
1230
1231         def _add_packages(self):
1232                 pkg_queue = self._pkg_queue
1233                 for pkg in self._mergelist:
1234                         if isinstance(pkg, Package):
1235                                 pkg_queue.append(pkg)
1236                         elif isinstance(pkg, Blocker):
1237                                 pass
1238
1239         def _system_merge_started(self, merge):
1240                 """
1241                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1242                 """
1243                 graph = self._digraph
1244                 if graph is None:
1245                         return
1246                 pkg = merge.merge.pkg
1247
1248                 # Skip this if $ROOT != / since it shouldn't matter if there
1249                 # are unsatisfied system runtime deps in this case.
1250                 if pkg.root != '/':
1251                         return
1252
1253                 completed_tasks = self._completed_tasks
1254                 unsatisfied = self._unsatisfied_system_deps
1255
1256                 def ignore_non_runtime_or_satisfied(priority):
1257                         """
1258                         Ignore non-runtime and satisfied runtime priorities.
1259                         """
1260                         if isinstance(priority, DepPriority) and \
1261                                 not priority.satisfied and \
1262                                 (priority.runtime or priority.runtime_post):
1263                                 return False
1264                         return True
1265
1266                 # When checking for unsatisfied runtime deps, only check
1267                 # direct deps since indirect deps are checked when the
1268                 # corresponding parent is merged.
1269                 for child in graph.child_nodes(pkg,
1270                         ignore_priority=ignore_non_runtime_or_satisfied):
1271                         if not isinstance(child, Package) or \
1272                                 child.operation == 'uninstall':
1273                                 continue
1274                         if child is pkg:
1275                                 continue
1276                         if child.operation == 'merge' and \
1277                                 child not in completed_tasks:
1278                                 unsatisfied.add(child)
1279
1280         def _merge_wait_exit_handler(self, task):
1281                 self._merge_wait_scheduled.remove(task)
1282                 self._merge_exit(task)
1283
1284         def _merge_exit(self, merge):
1285                 self._do_merge_exit(merge)
1286                 self._deallocate_config(merge.merge.settings)
1287                 if merge.returncode == os.EX_OK and \
1288                         not merge.merge.pkg.installed:
1289                         self._status_display.curval += 1
1290                 self._status_display.merges = len(self._task_queues.merge)
1291                 self._schedule()
1292
1293         def _do_merge_exit(self, merge):
1294                 pkg = merge.merge.pkg
1295                 self._running_tasks.remove(pkg)
1296                 if merge.returncode != os.EX_OK:
1297                         settings = merge.merge.settings
1298                         build_dir = settings.get("PORTAGE_BUILDDIR")
1299                         build_log = settings.get("PORTAGE_LOG_FILE")
1300
1301                         self._failed_pkgs.append(self._failed_pkg(
1302                                 build_dir=build_dir, build_log=build_log,
1303                                 pkg=pkg,
1304                                 returncode=merge.returncode))
1305                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1306
1307                         self._status_display.failed = len(self._failed_pkgs)
1308                         return
1309
1310                 self._task_complete(pkg)
1311                 pkg_to_replace = merge.merge.pkg_to_replace
1312                 if pkg_to_replace is not None:
1313                         # When a package is replaced, mark it's uninstall
1314                         # task complete (if any).
1315                         uninst_hash_key = \
1316                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
1317                         self._task_complete(uninst_hash_key)
1318
1319                 if pkg.installed:
1320                         return
1321
1322                 self._restart_if_necessary(pkg)
1323
1324                 # Call mtimedb.commit() after each merge so that
1325                 # --resume still works after being interrupted
1326                 # by reboot, sigkill or similar.
1327                 mtimedb = self._mtimedb
1328                 mtimedb["resume"]["mergelist"].remove(list(pkg))
1329                 if not mtimedb["resume"]["mergelist"]:
1330                         del mtimedb["resume"]
1331                 mtimedb.commit()
1332
1333         def _build_exit(self, build):
1334                 if build.returncode == os.EX_OK:
1335                         self.curval += 1
1336                         merge = PackageMerge(merge=build)
1337                         if not build.build_opts.buildpkgonly and \
1338                                 build.pkg in self._deep_system_deps:
1339                                 # Since dependencies on system packages are frequently
1340                                 # unspecified, merge them only when no builds are executing.
1341                                 self._merge_wait_queue.append(merge)
1342                                 merge.addStartListener(self._system_merge_started)
1343                         else:
1344                                 merge.addExitListener(self._merge_exit)
1345                                 self._task_queues.merge.add(merge)
1346                                 self._status_display.merges = len(self._task_queues.merge)
1347                 else:
1348                         self._running_tasks.remove(build.pkg)
1349                         settings = build.settings
1350                         build_dir = settings.get("PORTAGE_BUILDDIR")
1351                         build_log = settings.get("PORTAGE_LOG_FILE")
1352
1353                         self._failed_pkgs.append(self._failed_pkg(
1354                                 build_dir=build_dir, build_log=build_log,
1355                                 pkg=build.pkg,
1356                                 returncode=build.returncode))
1357                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1358
1359                         self._status_display.failed = len(self._failed_pkgs)
1360                         self._deallocate_config(build.settings)
1361                 self._jobs -= 1
1362                 self._status_display.running = self._jobs
1363                 self._schedule()
1364
1365         def _extract_exit(self, build):
1366                 self._build_exit(build)
1367
1368         def _task_complete(self, pkg):
1369                 self._completed_tasks.add(pkg)
1370                 self._unsatisfied_system_deps.discard(pkg)
1371                 self._choose_pkg_return_early = False
1372
1373         def _merge(self):
1374
1375                 self._add_prefetchers()
1376                 self._add_packages()
1377                 pkg_queue = self._pkg_queue
1378                 failed_pkgs = self._failed_pkgs
1379                 portage.locks._quiet = self._background
1380                 portage.elog.add_listener(self._elog_listener)
1381                 rval = os.EX_OK
1382
1383                 try:
1384                         self._main_loop()
1385                 finally:
1386                         self._main_loop_cleanup()
1387                         portage.locks._quiet = False
1388                         portage.elog.remove_listener(self._elog_listener)
1389                         if failed_pkgs:
1390                                 rval = failed_pkgs[-1].returncode
1391
1392                 return rval
1393
1394         def _main_loop_cleanup(self):
1395                 del self._pkg_queue[:]
1396                 self._completed_tasks.clear()
1397                 self._deep_system_deps.clear()
1398                 self._unsatisfied_system_deps.clear()
1399                 self._choose_pkg_return_early = False
1400                 self._status_display.reset()
1401                 self._digraph = None
1402                 self._task_queues.fetch.clear()
1403
1404         def _choose_pkg(self):
1405                 """
1406                 Choose a task that has all it's dependencies satisfied.
1407                 """
1408
1409                 if self._choose_pkg_return_early:
1410                         return None
1411
1412                 if self._digraph is None:
1413                         if self._is_work_scheduled() and \
1414                                 not ("--nodeps" in self.myopts and \
1415                                 (self._max_jobs is True or self._max_jobs > 1)):
1416                                 self._choose_pkg_return_early = True
1417                                 return None
1418                         return self._pkg_queue.pop(0)
1419
1420                 if not self._is_work_scheduled():
1421                         return self._pkg_queue.pop(0)
1422
1423                 self._prune_digraph()
1424
1425                 chosen_pkg = None
1426
1427                 # Prefer uninstall operations when available.
1428                 graph = self._digraph
1429                 for pkg in self._pkg_queue:
1430                         if pkg.operation == 'uninstall' and \
1431                                 not graph.child_nodes(pkg):
1432                                 chosen_pkg = pkg
1433                                 break
1434
1435                 if chosen_pkg is None:
1436                         later = set(self._pkg_queue)
1437                         for pkg in self._pkg_queue:
1438                                 later.remove(pkg)
1439                                 if not self._dependent_on_scheduled_merges(pkg, later):
1440                                         chosen_pkg = pkg
1441                                         break
1442
1443                 if chosen_pkg is not None:
1444                         self._pkg_queue.remove(chosen_pkg)
1445
1446                 if chosen_pkg is None:
1447                         # There's no point in searching for a package to
1448                         # choose until at least one of the existing jobs
1449                         # completes.
1450                         self._choose_pkg_return_early = True
1451
1452                 return chosen_pkg
1453
1454         def _dependent_on_scheduled_merges(self, pkg, later):
1455                 """
1456                 Traverse the subgraph of the given packages deep dependencies
1457                 to see if it contains any scheduled merges.
1458                 @param pkg: a package to check dependencies for
1459                 @type pkg: Package
1460                 @param later: packages for which dependence should be ignored
1461                         since they will be merged later than pkg anyway and therefore
1462                         delaying the merge of pkg will not result in a more optimal
1463                         merge order
1464                 @type later: set
1465                 @rtype: bool
1466                 @returns: True if the package is dependent, False otherwise.
1467                 """
1468
1469                 graph = self._digraph
1470                 completed_tasks = self._completed_tasks
1471
1472                 dependent = False
1473                 traversed_nodes = set([pkg])
1474                 direct_deps = graph.child_nodes(pkg)
1475                 node_stack = direct_deps
1476                 direct_deps = frozenset(direct_deps)
1477                 while node_stack:
1478                         node = node_stack.pop()
1479                         if node in traversed_nodes:
1480                                 continue
1481                         traversed_nodes.add(node)
1482                         if not ((node.installed and node.operation == "nomerge") or \
1483                                 (node.operation == "uninstall" and \
1484                                 node not in direct_deps) or \
1485                                 node in completed_tasks or \
1486                                 node in later):
1487                                 dependent = True
1488                                 break
1489
1490                         # Don't traverse children of uninstall nodes since
1491                         # those aren't dependencies in the usual sense.
1492                         if node.operation != "uninstall":
1493                                 node_stack.extend(graph.child_nodes(node))
1494
1495                 return dependent
1496
1497         def _allocate_config(self, root):
1498                 """
1499                 Allocate a unique config instance for a task in order
1500                 to prevent interference between parallel tasks.
1501                 """
1502                 if self._config_pool[root]:
1503                         temp_settings = self._config_pool[root].pop()
1504                 else:
1505                         temp_settings = portage.config(clone=self.pkgsettings[root])
1506                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
1507                 # performance reasons, call it here to make sure all settings from the
1508                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
1509                 temp_settings.reload()
1510                 temp_settings.reset()
1511                 return temp_settings
1512
1513         def _deallocate_config(self, settings):
1514                 self._config_pool[settings["ROOT"]].append(settings)
1515
1516         def _main_loop(self):
1517
1518                 # Only allow 1 job max if a restart is scheduled
1519                 # due to portage update.
1520                 if self._is_restart_scheduled() or \
1521                         self._opts_no_background.intersection(self.myopts):
1522                         self._set_max_jobs(1)
1523
1524                 while self._schedule():
1525                         if self._poll_event_handlers:
1526                                 self._poll_loop()
1527
1528                 while True:
1529                         self._schedule()
1530                         if not self._is_work_scheduled():
1531                                 break
1532                         if self._poll_event_handlers:
1533                                 self._poll_loop()
1534
1535         def _keep_scheduling(self):
1536                 return bool(self._pkg_queue and \
1537                         not (self._failed_pkgs and not self._build_opts.fetchonly))
1538
1539         def _is_work_scheduled(self):
1540                 return bool(self._running_tasks)
1541
1542         def _schedule_tasks(self):
1543
1544                 while True:
1545
1546                         # When the number of jobs drops to zero, process all waiting merges.
1547                         if not self._jobs and self._merge_wait_queue:
1548                                 for task in self._merge_wait_queue:
1549                                         task.addExitListener(self._merge_wait_exit_handler)
1550                                         self._task_queues.merge.add(task)
1551                                 self._status_display.merges = len(self._task_queues.merge)
1552                                 self._merge_wait_scheduled.extend(self._merge_wait_queue)
1553                                 del self._merge_wait_queue[:]
1554
1555                         self._schedule_tasks_imp()
1556                         self._status_display.display()
1557
1558                         state_change = 0
1559                         for q in self._task_queues.values():
1560                                 if q.schedule():
1561                                         state_change += 1
1562
1563                         # Cancel prefetchers if they're the only reason
1564                         # the main poll loop is still running.
1565                         if self._failed_pkgs and not self._build_opts.fetchonly and \
1566                                 not self._is_work_scheduled() and \
1567                                 self._task_queues.fetch:
1568                                 self._task_queues.fetch.clear()
1569                                 state_change += 1
1570
1571                         if not (state_change or \
1572                                 (not self._jobs and self._merge_wait_queue)):
1573                                 break
1574
1575                 return self._keep_scheduling()
1576
1577         def _job_delay(self):
1578                 """
1579                 @rtype: bool
1580                 @returns: True if job scheduling should be delayed, False otherwise.
1581                 """
1582
1583                 if self._jobs and self._max_load is not None:
1584
1585                         current_time = time.time()
1586
1587                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1588                         if delay > self._job_delay_max:
1589                                 delay = self._job_delay_max
1590                         if (current_time - self._previous_job_start_time) < delay:
1591                                 return True
1592
1593                 return False
1594
1595         def _schedule_tasks_imp(self):
1596                 """
1597                 @rtype: bool
1598                 @returns: True if state changed, False otherwise.
1599                 """
1600
1601                 state_change = 0
1602
1603                 while True:
1604
1605                         if not self._keep_scheduling():
1606                                 return bool(state_change)
1607
1608                         if self._choose_pkg_return_early or \
1609                                 self._merge_wait_scheduled or \
1610                                 (self._jobs and self._unsatisfied_system_deps) or \
1611                                 not self._can_add_job() or \
1612                                 self._job_delay():
1613                                 return bool(state_change)
1614
1615                         pkg = self._choose_pkg()
1616                         if pkg is None:
1617                                 return bool(state_change)
1618
1619                         state_change += 1
1620
1621                         if not pkg.installed:
1622                                 self._pkg_count.curval += 1
1623
1624                         task = self._task(pkg)
1625                         self._running_tasks.add(pkg)
1626
1627                         if pkg.installed:
1628                                 merge = PackageMerge(merge=task)
1629                                 merge.addExitListener(self._merge_exit)
1630                                 self._task_queues.merge.addFront(merge)
1631
1632                         elif pkg.built:
1633                                 self._jobs += 1
1634                                 self._previous_job_start_time = time.time()
1635                                 self._status_display.running = self._jobs
1636                                 task.addExitListener(self._extract_exit)
1637                                 self._task_queues.jobs.add(task)
1638
1639                         else:
1640                                 self._jobs += 1
1641                                 self._previous_job_start_time = time.time()
1642                                 self._status_display.running = self._jobs
1643                                 task.addExitListener(self._build_exit)
1644                                 self._task_queues.jobs.add(task)
1645
1646                 return bool(state_change)
1647
1648         def _task(self, pkg):
1649
1650                 pkg_to_replace = None
1651                 if pkg.operation != "uninstall":
1652                         vardb = pkg.root_config.trees["vartree"].dbapi
1653                         previous_cpv = vardb.match(pkg.slot_atom)
1654                         if previous_cpv:
1655                                 previous_cpv = previous_cpv.pop()
1656                                 pkg_to_replace = self._pkg(previous_cpv,
1657                                         "installed", pkg.root_config, installed=True)
1658
1659                 task = MergeListItem(args_set=self._args_set,
1660                         background=self._background, binpkg_opts=self._binpkg_opts,
1661                         build_opts=self._build_opts,
1662                         config_pool=self._ConfigPool(pkg.root,
1663                         self._allocate_config, self._deallocate_config),
1664                         emerge_opts=self.myopts,
1665                         find_blockers=self._find_blockers(pkg), logger=self._logger,
1666                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1667                         pkg_to_replace=pkg_to_replace,
1668                         prefetcher=self._prefetchers.get(pkg),
1669                         scheduler=self._sched_iface,
1670                         settings=self._allocate_config(pkg.root),
1671                         statusMessage=self._status_msg,
1672                         world_atom=self._world_atom)
1673
1674                 return task
1675
1676         def _failed_pkg_msg(self, failed_pkg, action, preposition):
1677                 pkg = failed_pkg.pkg
1678                 msg = "%s to %s %s" % \
1679                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1680                 if pkg.root != "/":
1681                         msg += " %s %s" % (preposition, pkg.root)
1682
1683                 log_path = self._locate_failure_log(failed_pkg)
1684                 if log_path is not None:
1685                         msg += ", Log file:"
1686                 self._status_msg(msg)
1687
1688                 if log_path is not None:
1689                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1690
1691         def _status_msg(self, msg):
1692                 """
1693                 Display a brief status message (no newlines) in the status display.
1694                 This is called by tasks to provide feedback to the user. This
1695                 delegates the resposibility of generating \r and \n control characters,
1696                 to guarantee that lines are created or erased when necessary and
1697                 appropriate.
1698
1699                 @type msg: str
1700                 @param msg: a brief status message (no newlines allowed)
1701                 """
1702                 if not self._background:
1703                         writemsg_level("\n")
1704                 self._status_display.displayMessage(msg)
1705
1706         def _save_resume_list(self):
1707                 """
1708                 Do this before verifying the ebuild Manifests since it might
1709                 be possible for the user to use --resume --skipfirst get past
1710                 a non-essential package with a broken digest.
1711                 """
1712                 mtimedb = self._mtimedb
1713
1714                 mtimedb["resume"] = {}
1715                 # Stored as a dict starting with portage-2.1.6_rc1, and supported
1716                 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
1717                 # a list type for options.
1718                 mtimedb["resume"]["myopts"] = self.myopts.copy()
1719
1720                 # Convert Atom instances to plain str.
1721                 mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
1722                 mtimedb["resume"]["mergelist"] = [list(x) \
1723                         for x in self._mergelist \
1724                         if isinstance(x, Package) and x.operation == "merge"]
1725
1726                 mtimedb.commit()
1727
1728         def _calc_resume_list(self):
1729                 """
1730                 Use the current resume list to calculate a new one,
1731                 dropping any packages with unsatisfied deps.
1732                 @rtype: bool
1733                 @returns: True if successful, False otherwise.
1734                 """
1735                 print(colorize("GOOD", "*** Resuming merge..."))
1736
1737                 myparams = create_depgraph_params(self.myopts, None)
1738                 success = False
1739                 e = None
1740                 try:
1741                         success, mydepgraph, dropped_tasks = resume_depgraph(
1742                                 self.settings, self.trees, self._mtimedb, self.myopts,
1743                                 myparams, self._spinner)
1744                 except depgraph.UnsatisfiedResumeDep as exc:
1745                         # rename variable to avoid python-3.0 error:
1746                         # SyntaxError: can not delete variable 'e' referenced in nested
1747                         #              scope
1748                         e = exc
1749                         mydepgraph = e.depgraph
1750                         dropped_tasks = set()
1751
1752                 if e is not None:
1753                         def unsatisfied_resume_dep_msg():
1754                                 mydepgraph.display_problems()
1755                                 out = portage.output.EOutput()
1756                                 out.eerror("One or more packages are either masked or " + \
1757                                         "have missing dependencies:")
1758                                 out.eerror("")
1759                                 indent = "  "
1760                                 show_parents = set()
1761                                 for dep in e.value:
1762                                         if dep.parent in show_parents:
1763                                                 continue
1764                                         show_parents.add(dep.parent)
1765                                         if dep.atom is None:
1766                                                 out.eerror(indent + "Masked package:")
1767                                                 out.eerror(2 * indent + str(dep.parent))
1768                                                 out.eerror("")
1769                                         else:
1770                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
1771                                                 out.eerror(2 * indent + str(dep.parent))
1772                                                 out.eerror("")
1773                                 msg = "The resume list contains packages " + \
1774                                         "that are either masked or have " + \
1775                                         "unsatisfied dependencies. " + \
1776                                         "Please restart/continue " + \
1777                                         "the operation manually, or use --skipfirst " + \
1778                                         "to skip the first package in the list and " + \
1779                                         "any other packages that may be " + \
1780                                         "masked or have missing dependencies."
1781                                 for line in textwrap.wrap(msg, 72):
1782                                         out.eerror(line)
1783                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1784                         return False
1785
1786                 if success and self._show_list():
1787                         mylist = mydepgraph.altlist()
1788                         if mylist:
1789                                 if "--tree" in self.myopts:
1790                                         mylist.reverse()
1791                                 mydepgraph.display(mylist, favorites=self._favorites)
1792
1793                 if not success:
1794                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1795                         return False
1796                 mydepgraph.display_problems()
1797
1798                 mylist = mydepgraph.altlist()
1799                 mydepgraph.break_refs(mylist)
1800                 mydepgraph.break_refs(dropped_tasks)
1801                 self._mergelist = mylist
1802                 self._set_digraph(mydepgraph.schedulerGraph())
1803
1804                 msg_width = 75
1805                 for task in dropped_tasks:
1806                         if not (isinstance(task, Package) and task.operation == "merge"):
1807                                 continue
1808                         pkg = task
1809                         msg = "emerge --keep-going:" + \
1810                                 " %s" % (pkg.cpv,)
1811                         if pkg.root != "/":
1812                                 msg += " for %s" % (pkg.root,)
1813                         msg += " dropped due to unsatisfied dependency."
1814                         for line in textwrap.wrap(msg, msg_width):
1815                                 eerror(line, phase="other", key=pkg.cpv)
1816                         settings = self.pkgsettings[pkg.root]
1817                         # Ensure that log collection from $T is disabled inside
1818                         # elog_process(), since any logs that might exist are
1819                         # not valid here.
1820                         settings.pop("T", None)
1821                         portage.elog.elog_process(pkg.cpv, settings)
1822                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1823
1824                 return True
1825
1826         def _show_list(self):
1827                 myopts = self.myopts
1828                 if "--quiet" not in myopts and \
1829                         ("--ask" in myopts or "--tree" in myopts or \
1830                         "--verbose" in myopts):
1831                         return True
1832                 return False
1833
1834         def _world_atom(self, pkg):
1835                 """
1836                 Add or remove the package to the world file, but only if
1837                 it's supposed to be added or removed. Otherwise, do nothing.
1838                 """
1839
1840                 if set(("--buildpkgonly", "--fetchonly",
1841                         "--fetch-all-uri",
1842                         "--oneshot", "--onlydeps",
1843                         "--pretend")).intersection(self.myopts):
1844                         return
1845
1846                 if pkg.root != self.target_root:
1847                         return
1848
1849                 args_set = self._args_set
1850                 if not args_set.findAtomForPackage(pkg):
1851                         return
1852
1853                 logger = self._logger
1854                 pkg_count = self._pkg_count
1855                 root_config = pkg.root_config
1856                 world_set = root_config.sets["selected"]
1857                 world_locked = False
1858                 if hasattr(world_set, "lock"):
1859                         world_set.lock()
1860                         world_locked = True
1861
1862                 try:
1863                         if hasattr(world_set, "load"):
1864                                 world_set.load() # maybe it's changed on disk
1865
1866                         if pkg.operation == "uninstall":
1867                                 if hasattr(world_set, "cleanPackage"):
1868                                         world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
1869                                                         pkg.cpv)
1870                                 if hasattr(world_set, "remove"):
1871                                         for s in pkg.root_config.setconfig.active:
1872                                                 world_set.remove(SETPREFIX+s)
1873                         else:
1874                                 atom = create_world_atom(pkg, args_set, root_config)
1875                                 if atom:
1876                                         if hasattr(world_set, "add"):
1877                                                 self._status_msg(('Recording %s in "world" ' + \
1878                                                         'favorites file...') % atom)
1879                                                 logger.log(" === (%s of %s) Updating world file (%s)" % \
1880                                                         (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1881                                                 world_set.add(atom)
1882                                         else:
1883                                                 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1884                                                         (atom,), level=logging.WARN, noiselevel=-1)
1885                 finally:
1886                         if world_locked:
1887                                 world_set.unlock()
1888
1889         def _pkg(self, cpv, type_name, root_config, installed=False):
1890                 """
1891                 Get a package instance from the cache, or create a new
1892                 one if necessary. Raises KeyError from aux_get if it
1893                 failures for some reason (package does not exist or is
1894                 corrupt).
1895                 """
1896                 operation = "merge"
1897                 if installed:
1898                         operation = "nomerge"
1899
1900                 if self._digraph is not None:
1901                         # Reuse existing instance when available.
1902                         pkg = self._digraph.get(
1903                                 (type_name, root_config.root, cpv, operation))
1904                         if pkg is not None:
1905                                 return pkg
1906
1907                 tree_type = depgraph.pkg_tree_map[type_name]
1908                 db = root_config.trees[tree_type].dbapi
1909                 db_keys = list(self.trees[root_config.root][
1910                         tree_type].dbapi._aux_cache_keys)
1911                 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
1912                 return Package(built=(type_name != 'ebuild'),
1913                         cpv=cpv, metadata=metadata,
1914                         root_config=root_config, installed=installed)