Scheduler: Always display message for pkg_pretend
[portage.git] / pym / _emerge / Scheduler.py
1 # Copyright 1999-2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
3
4 from __future__ import print_function
5
6 import gc
7 import gzip
8 import logging
9 import shutil
10 import sys
11 import tempfile
12 import textwrap
13 import time
14 import warnings
15 import weakref
16 import zlib
17
18 import portage
19 from portage import StringIO
20 from portage import os
21 from portage import _encodings
22 from portage import _unicode_decode, _unicode_encode
23 from portage.cache.mappings import slot_dict_class
24 from portage.const import LIBC_PACKAGE_ATOM
25 from portage.elog.messages import eerror
26 from portage.localization import _
27 from portage.output import colorize, create_color_func, red
28 bad = create_color_func("BAD")
29 from portage._sets import SETPREFIX
30 from portage._sets.base import InternalPackageSet
31 from portage.util import writemsg, writemsg_level
32 from portage.package.ebuild.digestcheck import digestcheck
33 from portage.package.ebuild.digestgen import digestgen
34 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
35
36 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
37 from _emerge.Blocker import Blocker
38 from _emerge.BlockerDB import BlockerDB
39 from _emerge.clear_caches import clear_caches
40 from _emerge.create_depgraph_params import create_depgraph_params
41 from _emerge.create_world_atom import create_world_atom
42 from _emerge.DepPriority import DepPriority
43 from _emerge.depgraph import depgraph, resume_depgraph
44 from _emerge.EbuildFetcher import EbuildFetcher
45 from _emerge.EbuildPhase import EbuildPhase
46 from _emerge.emergelog import emergelog, _emerge_log_dir
47 from _emerge.FakeVartree import FakeVartree
48 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
49 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
50 from _emerge.JobStatusDisplay import JobStatusDisplay
51 from _emerge.MergeListItem import MergeListItem
52 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
53 from _emerge.Package import Package
54 from _emerge.PackageMerge import PackageMerge
55 from _emerge.PollScheduler import PollScheduler
56 from _emerge.RootConfig import RootConfig
57 from _emerge.SlotObject import SlotObject
58 from _emerge.SequentialTaskQueue import SequentialTaskQueue
59
60 if sys.hexversion >= 0x3000000:
61         basestring = str
62
63 class Scheduler(PollScheduler):
64
65         # max time between display status updates (milliseconds)
66         _max_display_latency = 3000
67
68         _opts_ignore_blockers = \
69                 frozenset(["--buildpkgonly",
70                 "--fetchonly", "--fetch-all-uri",
71                 "--nodeps", "--pretend"])
72
73         _opts_no_background = \
74                 frozenset(["--pretend",
75                 "--fetchonly", "--fetch-all-uri"])
76
77         _opts_no_restart = frozenset(["--buildpkgonly",
78                 "--fetchonly", "--fetch-all-uri", "--pretend"])
79
80         _bad_resume_opts = set(["--ask", "--changelog",
81                 "--resume", "--skipfirst"])
82
83         _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
84
85         class _iface_class(SlotObject):
86                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
87                         "dblinkElog", "dblinkEmergeLog", "fetch",
88                         "output", "register", "schedule",
89                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
90                         "unregister")
91
92         class _fetch_iface_class(SlotObject):
93                 __slots__ = ("log_file", "schedule")
94
95         _task_queues_class = slot_dict_class(
96                 ("merge", "jobs", "fetch", "unpack"), prefix="")
97
98         class _build_opts_class(SlotObject):
99                 __slots__ = ("buildpkg", "buildpkgonly",
100                         "fetch_all_uri", "fetchonly", "pretend")
101
102         class _binpkg_opts_class(SlotObject):
103                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
104
105         class _pkg_count_class(SlotObject):
106                 __slots__ = ("curval", "maxval")
107
108         class _emerge_log_class(SlotObject):
109                 __slots__ = ("xterm_titles",)
110
111                 def log(self, *pargs, **kwargs):
112                         if not self.xterm_titles:
113                                 # Avoid interference with the scheduler's status display.
114                                 kwargs.pop("short_msg", None)
115                         emergelog(self.xterm_titles, *pargs, **kwargs)
116
117         class _failed_pkg(SlotObject):
118                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
119
120         class _ConfigPool(object):
121                 """Interface for a task to temporarily allocate a config
122                 instance from a pool. This allows a task to be constructed
123                 long before the config instance actually becomes needed, like
124                 when prefetchers are constructed for the whole merge list."""
125                 __slots__ = ("_root", "_allocate", "_deallocate")
126                 def __init__(self, root, allocate, deallocate):
127                         self._root = root
128                         self._allocate = allocate
129                         self._deallocate = deallocate
130                 def allocate(self):
131                         return self._allocate(self._root)
132                 def deallocate(self, settings):
133                         self._deallocate(settings)
134
135         class _unknown_internal_error(portage.exception.PortageException):
136                 """
137                 Used internally to terminate scheduling. The specific reason for
138                 the failure should have been dumped to stderr.
139                 """
140                 def __init__(self, value=""):
141                         portage.exception.PortageException.__init__(self, value)
142
143         def __init__(self, settings, trees, mtimedb, myopts,
144                 spinner, mergelist=None, favorites=None, graph_config=None):
145                 PollScheduler.__init__(self)
146
147                 if mergelist is not None:
148                         warnings.warn("The mergelist parameter of the " + \
149                                 "_emerge.Scheduler constructor is now unused. Use " + \
150                                 "the graph_config parameter instead.",
151                                 DeprecationWarning, stacklevel=2)
152
153                 self.settings = settings
154                 self.target_root = settings["ROOT"]
155                 self.trees = trees
156                 self.myopts = myopts
157                 self._spinner = spinner
158                 self._mtimedb = mtimedb
159                 self._favorites = favorites
160                 self._args_set = InternalPackageSet(favorites, allow_repo=True)
161                 self._build_opts = self._build_opts_class()
162                 for k in self._build_opts.__slots__:
163                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
164                 self._binpkg_opts = self._binpkg_opts_class()
165                 for k in self._binpkg_opts.__slots__:
166                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
167
168                 self.curval = 0
169                 self._logger = self._emerge_log_class()
170                 self._task_queues = self._task_queues_class()
171                 for k in self._task_queues.allowed_keys:
172                         setattr(self._task_queues, k,
173                                 SequentialTaskQueue())
174
175                 # Holds merges that will wait to be executed when no builds are
176                 # executing. This is useful for system packages since dependencies
177                 # on system packages are frequently unspecified.
178                 self._merge_wait_queue = []
179                 # Holds merges that have been transfered from the merge_wait_queue to
180                 # the actual merge queue. They are removed from this list upon
181                 # completion. Other packages can start building only when this list is
182                 # empty.
183                 self._merge_wait_scheduled = []
184
185                 # Holds system packages and their deep runtime dependencies. Before
186                 # being merged, these packages go to merge_wait_queue, to be merged
187                 # when no other packages are building.
188                 self._deep_system_deps = set()
189
190                 # Holds packages to merge which will satisfy currently unsatisfied
191                 # deep runtime dependencies of system packages. If this is not empty
192                 # then no parallel builds will be spawned until it is empty. This
193                 # minimizes the possibility that a build will fail due to the system
194                 # being in a fragile state. For example, see bug #259954.
195                 self._unsatisfied_system_deps = set()
196
197                 self._status_display = JobStatusDisplay(
198                         xterm_titles=('notitles' not in settings.features))
199                 self._max_load = myopts.get("--load-average")
200                 max_jobs = myopts.get("--jobs")
201                 if max_jobs is None:
202                         max_jobs = 1
203                 self._set_max_jobs(max_jobs)
204
205                 # The root where the currently running
206                 # portage instance is installed.
207                 self._running_root = trees["/"]["root_config"]
208                 self.edebug = 0
209                 if settings.get("PORTAGE_DEBUG", "") == "1":
210                         self.edebug = 1
211                 self.pkgsettings = {}
212                 self._config_pool = {}
213                 for root in self.trees:
214                         self._config_pool[root] = []
215
216                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
217                         schedule=self._schedule_fetch)
218                 self._sched_iface = self._iface_class(
219                         dblinkEbuildPhase=self._dblink_ebuild_phase,
220                         dblinkDisplayMerge=self._dblink_display_merge,
221                         dblinkElog=self._dblink_elog,
222                         dblinkEmergeLog=self._dblink_emerge_log,
223                         fetch=fetch_iface, output=self._task_output,
224                         register=self._register,
225                         schedule=self._schedule_wait,
226                         scheduleSetup=self._schedule_setup,
227                         scheduleUnpack=self._schedule_unpack,
228                         scheduleYield=self._schedule_yield,
229                         unregister=self._unregister)
230
231                 self._prefetchers = weakref.WeakValueDictionary()
232                 self._pkg_queue = []
233                 self._running_tasks = set()
234                 self._completed_tasks = set()
235
236                 self._failed_pkgs = []
237                 self._failed_pkgs_all = []
238                 self._failed_pkgs_die_msgs = []
239                 self._post_mod_echo_msgs = []
240                 self._parallel_fetch = False
241                 self._init_graph(graph_config)
242                 merge_count = len([x for x in self._mergelist \
243                         if isinstance(x, Package) and x.operation == "merge"])
244                 self._pkg_count = self._pkg_count_class(
245                         curval=0, maxval=merge_count)
246                 self._status_display.maxval = self._pkg_count.maxval
247
248                 # The load average takes some time to respond when new
249                 # jobs are added, so we need to limit the rate of adding
250                 # new jobs.
251                 self._job_delay_max = 10
252                 self._job_delay_factor = 1.0
253                 self._job_delay_exp = 1.5
254                 self._previous_job_start_time = None
255
256                 # This is used to memoize the _choose_pkg() result when
257                 # no packages can be chosen until one of the existing
258                 # jobs completes.
259                 self._choose_pkg_return_early = False
260
261                 features = self.settings.features
262                 if "parallel-fetch" in features and \
263                         not ("--pretend" in self.myopts or \
264                         "--fetch-all-uri" in self.myopts or \
265                         "--fetchonly" in self.myopts):
266                         if "distlocks" not in features:
267                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
268                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
269                                         "requires the distlocks feature enabled"+"\n",
270                                         noiselevel=-1)
271                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
272                                         "thus parallel-fetching is being disabled"+"\n",
273                                         noiselevel=-1)
274                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
275                         elif merge_count > 1:
276                                 self._parallel_fetch = True
277
278                 if self._parallel_fetch:
279                                 # clear out existing fetch log if it exists
280                                 try:
281                                         open(self._fetch_log, 'w')
282                                 except EnvironmentError:
283                                         pass
284
285                 self._running_portage = None
286                 portage_match = self._running_root.trees["vartree"].dbapi.match(
287                         portage.const.PORTAGE_PACKAGE_ATOM)
288                 if portage_match:
289                         cpv = portage_match.pop()
290                         self._running_portage = self._pkg(cpv, "installed",
291                                 self._running_root, installed=True)
292
293         def _init_graph(self, graph_config):
294                 """
295                 Initialization structures used for dependency calculations
296                 involving currently installed packages.
297                 """
298                 # TODO: Replace the BlockerDB with a depgraph of installed packages
299                 # that's updated incrementally with each upgrade/uninstall operation
300                 # This will be useful for making quick and safe decisions with respect
301                 # to aggressive parallelization discussed in bug #279623.
302                 self._set_graph_config(graph_config)
303                 self._blocker_db = {}
304                 for root in self.trees:
305                         if graph_config is None:
306                                 fake_vartree = FakeVartree(self.trees[root]["root_config"])
307                         else:
308                                 fake_vartree = graph_config.trees[root]['vartree']
309                         self._blocker_db[root] = BlockerDB(fake_vartree)
310
311         def _destroy_graph(self):
312                 """
313                 Use this to free memory at the beginning of _calc_resume_list().
314                 After _calc_resume_list(), the _init_graph() method
315                 must to be called in order to re-generate the structures that
316                 this method destroys. 
317                 """
318                 self._blocker_db = None
319                 self._set_graph_config(None)
320                 gc.collect()
321
322         def _poll(self, timeout=None):
323
324                 self._schedule()
325
326                 if timeout is None:
327                         while True:
328                                 if not self._poll_event_handlers:
329                                         self._schedule()
330                                         if not self._poll_event_handlers:
331                                                 raise StopIteration(
332                                                         "timeout is None and there are no poll() event handlers")
333                                 previous_count = len(self._poll_event_queue)
334                                 PollScheduler._poll(self, timeout=self._max_display_latency)
335                                 self._status_display.display()
336                                 if previous_count != len(self._poll_event_queue):
337                                         break
338
339                 elif timeout <= self._max_display_latency:
340                         PollScheduler._poll(self, timeout=timeout)
341                         if timeout == 0:
342                                 # The display is updated by _schedule() above, so it would be
343                                 # redundant to update it here when timeout is 0.
344                                 pass
345                         else:
346                                 self._status_display.display()
347
348                 else:
349                         remaining_timeout = timeout
350                         start_time = time.time()
351                         while True:
352                                 previous_count = len(self._poll_event_queue)
353                                 PollScheduler._poll(self,
354                                         timeout=min(self._max_display_latency, remaining_timeout))
355                                 self._status_display.display()
356                                 if previous_count != len(self._poll_event_queue):
357                                         break
358                                 elapsed_time = time.time() - start_time
359                                 if elapsed_time < 0:
360                                         # The system clock has changed such that start_time
361                                         # is now in the future, so just assume that the
362                                         # timeout has already elapsed.
363                                         break
364                                 remaining_timeout = timeout - 1000 * elapsed_time
365                                 if remaining_timeout <= 0:
366                                         break
367
368         def _set_max_jobs(self, max_jobs):
369                 self._max_jobs = max_jobs
370                 self._task_queues.jobs.max_jobs = max_jobs
371
372         def _background_mode(self):
373                 """
374                 Check if background mode is enabled and adjust states as necessary.
375
376                 @rtype: bool
377                 @returns: True if background mode is enabled, False otherwise.
378                 """
379                 background = (self._max_jobs is True or \
380                         self._max_jobs > 1 or "--quiet" in self.myopts \
381                         or "--quiet-build" in self.myopts) and \
382                         not bool(self._opts_no_background.intersection(self.myopts))
383
384                 if background:
385                         interactive_tasks = self._get_interactive_tasks()
386                         if interactive_tasks:
387                                 background = False
388                                 writemsg_level(">>> Sending package output to stdio due " + \
389                                         "to interactive package(s):\n",
390                                         level=logging.INFO, noiselevel=-1)
391                                 msg = [""]
392                                 for pkg in interactive_tasks:
393                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
394                                         if pkg.root != "/":
395                                                 pkg_str += " for " + pkg.root
396                                         msg.append(pkg_str)
397                                 msg.append("")
398                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
399                                         level=logging.INFO, noiselevel=-1)
400                                 if self._max_jobs is True or self._max_jobs > 1:
401                                         self._set_max_jobs(1)
402                                         writemsg_level(">>> Setting --jobs=1 due " + \
403                                                 "to the above interactive package(s)\n",
404                                                 level=logging.INFO, noiselevel=-1)
405                                         writemsg_level(">>> In order to temporarily mask " + \
406                                                 "interactive updates, you may\n" + \
407                                                 ">>> specify --accept-properties=-interactive\n",
408                                                 level=logging.INFO, noiselevel=-1)
409                 self._status_display.quiet = \
410                         not background or \
411                         ("--quiet" in self.myopts and \
412                         "--verbose" not in self.myopts)
413
414                 self._logger.xterm_titles = \
415                         "notitles" not in self.settings.features and \
416                         self._status_display.quiet
417
418                 return background
419
420         def _get_interactive_tasks(self):
421                 interactive_tasks = []
422                 for task in self._mergelist:
423                         if not (isinstance(task, Package) and \
424                                 task.operation == "merge"):
425                                 continue
426                         if 'interactive' in task.metadata.properties:
427                                 interactive_tasks.append(task)
428                 return interactive_tasks
429
430         def _set_graph_config(self, graph_config):
431
432                 if graph_config is None:
433                         self._graph_config = None
434                         self._digraph = None
435                         self._mergelist = []
436                         self._deep_system_deps.clear()
437                         return
438
439                 self._graph_config = graph_config
440                 self._digraph = graph_config.graph
441                 self._mergelist = graph_config.mergelist
442
443                 if "--nodeps" in self.myopts or \
444                         (self._max_jobs is not True and self._max_jobs < 2):
445                         # save some memory
446                         self._digraph = None
447                         graph_config.graph = None
448                         graph_config.pkg_cache.clear()
449                         self._deep_system_deps.clear()
450                         return
451
452                 self._find_system_deps()
453                 self._prune_digraph()
454                 self._prevent_builddir_collisions()
455                 self._implicit_libc_deps()
456                 if '--debug' in self.myopts:
457                         writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
458                         self._digraph.debug_print()
459                         writemsg("\n", noiselevel=-1)
460
461         def _find_system_deps(self):
462                 """
463                 Find system packages and their deep runtime dependencies. Before being
464                 merged, these packages go to merge_wait_queue, to be merged when no
465                 other packages are building.
466                 NOTE: This can only find deep system deps if the system set has been
467                 added to the graph and traversed deeply (the depgraph "complete"
468                 parameter will do this, triggered by emerge --complete-graph option).
469                 """
470                 deep_system_deps = self._deep_system_deps
471                 deep_system_deps.clear()
472                 deep_system_deps.update(
473                         _find_deep_system_runtime_deps(self._digraph))
474                 deep_system_deps.difference_update([pkg for pkg in \
475                         deep_system_deps if pkg.operation != "merge"])
476
477         def _prune_digraph(self):
478                 """
479                 Prune any root nodes that are irrelevant.
480                 """
481
482                 graph = self._digraph
483                 completed_tasks = self._completed_tasks
484                 removed_nodes = set()
485                 while True:
486                         for node in graph.root_nodes():
487                                 if not isinstance(node, Package) or \
488                                         (node.installed and node.operation == "nomerge") or \
489                                         node.onlydeps or \
490                                         node in completed_tasks:
491                                         removed_nodes.add(node)
492                         if removed_nodes:
493                                 graph.difference_update(removed_nodes)
494                         if not removed_nodes:
495                                 break
496                         removed_nodes.clear()
497
498         def _prevent_builddir_collisions(self):
499                 """
500                 When building stages, sometimes the same exact cpv needs to be merged
501                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
502                 in the builddir. Currently, normal file locks would be inappropriate
503                 for this purpose since emerge holds all of it's build dir locks from
504                 the main process.
505                 """
506                 cpv_map = {}
507                 for pkg in self._mergelist:
508                         if not isinstance(pkg, Package):
509                                 # a satisfied blocker
510                                 continue
511                         if pkg.installed:
512                                 continue
513                         if pkg.cpv not in cpv_map:
514                                 cpv_map[pkg.cpv] = [pkg]
515                                 continue
516                         for earlier_pkg in cpv_map[pkg.cpv]:
517                                 self._digraph.add(earlier_pkg, pkg,
518                                         priority=DepPriority(buildtime=True))
519                         cpv_map[pkg.cpv].append(pkg)
520
521         def _implicit_libc_deps(self):
522                 """
523                 Create implicit dependencies on libc, in order to ensure that libc
524                 is installed as early as possible (see bug #303567). If the merge
525                 list contains both a new-style virtual and an old-style PROVIDE
526                 virtual, the new-style virtual is used.
527                 """
528                 implicit_libc_roots = set([self._running_root.root])
529                 libc_set = InternalPackageSet([LIBC_PACKAGE_ATOM])
530                 norm_libc_pkgs = {}
531                 virt_libc_pkgs = {}
532                 for pkg in self._mergelist:
533                         if not isinstance(pkg, Package):
534                                 # a satisfied blocker
535                                 continue
536                         if pkg.installed:
537                                 continue
538                         if pkg.root in implicit_libc_roots and \
539                                 pkg.operation == 'merge':
540                                 if libc_set.findAtomForPackage(pkg):
541                                         if pkg.category == 'virtual':
542                                                 d = virt_libc_pkgs
543                                         else:
544                                                 d = norm_libc_pkgs
545                                         if pkg.root in d:
546                                                 raise AssertionError(
547                                                         "found 2 libc matches: %s and %s" % \
548                                                         (d[pkg.root], pkg))
549                                         d[pkg.root] = pkg
550
551                 # Prefer new-style virtuals over old-style PROVIDE virtuals.
552                 libc_pkg_map = norm_libc_pkgs.copy()
553                 libc_pkg_map.update(virt_libc_pkgs)
554
555                 # Only add a dep when the version changes.
556                 for libc_pkg in list(libc_pkg_map.values()):
557                         if libc_pkg.root_config.trees['vartree'].dbapi.cpv_exists(
558                                 libc_pkg.cpv):
559                                 del libc_pkg_map[pkg.root]
560
561                 if not libc_pkg_map:
562                         return
563
564                 libc_pkgs = set(libc_pkg_map.values())
565                 earlier_libc_pkgs = set()
566
567                 for pkg in self._mergelist:
568                         if not isinstance(pkg, Package):
569                                 # a satisfied blocker
570                                 continue
571                         if pkg.installed:
572                                 continue
573                         if pkg.root in implicit_libc_roots and \
574                                 pkg.operation == 'merge':
575                                 if pkg in libc_pkgs:
576                                         earlier_libc_pkgs.add(pkg)
577                                 else:
578                                         my_libc = libc_pkg_map.get(pkg.root)
579                                         if my_libc is not None and \
580                                                 my_libc in earlier_libc_pkgs:
581                                                 self._digraph.add(my_libc, pkg,
582                                                         priority=DepPriority(buildtime=True))
583
584         class _pkg_failure(portage.exception.PortageException):
585                 """
586                 An instance of this class is raised by unmerge() when
587                 an uninstallation fails.
588                 """
589                 status = 1
590                 def __init__(self, *pargs):
591                         portage.exception.PortageException.__init__(self, pargs)
592                         if pargs:
593                                 self.status = pargs[0]
594
595         def _schedule_fetch(self, fetcher):
596                 """
597                 Schedule a fetcher on the fetch queue, in order to
598                 serialize access to the fetch log.
599                 """
600                 self._task_queues.fetch.addFront(fetcher)
601
602         def _schedule_setup(self, setup_phase):
603                 """
604                 Schedule a setup phase on the merge queue, in order to
605                 serialize unsandboxed access to the live filesystem.
606                 """
607                 self._task_queues.merge.add(setup_phase)
608                 self._schedule()
609
610         def _schedule_unpack(self, unpack_phase):
611                 """
612                 Schedule an unpack phase on the unpack queue, in order
613                 to serialize $DISTDIR access for live ebuilds.
614                 """
615                 self._task_queues.unpack.add(unpack_phase)
616
617         def _find_blockers(self, new_pkg):
618                 """
619                 Returns a callable which should be called only when
620                 the vdb lock has been acquired.
621                 """
622                 def get_blockers():
623                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
624                 return get_blockers
625
626         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
627                 if self._opts_ignore_blockers.intersection(self.myopts):
628                         return None
629
630                 # Call gc.collect() here to avoid heap overflow that
631                 # triggers 'Cannot allocate memory' errors (reported
632                 # with python-2.5).
633                 gc.collect()
634
635                 blocker_db = self._blocker_db[new_pkg.root]
636
637                 blocker_dblinks = []
638                 for blocking_pkg in blocker_db.findInstalledBlockers(
639                         new_pkg, acquire_lock=acquire_lock):
640                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
641                                 continue
642                         if new_pkg.cpv == blocking_pkg.cpv:
643                                 continue
644                         blocker_dblinks.append(portage.dblink(
645                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
646                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
647                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
648
649                 gc.collect()
650
651                 return blocker_dblinks
652
653         def _dblink_pkg(self, pkg_dblink):
654                 cpv = pkg_dblink.mycpv
655                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
656                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
657                 installed = type_name == "installed"
658                 return self._pkg(cpv, type_name, root_config, installed=installed)
659
660         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
661
662                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
663                 out = StringIO()
664
665                 for msg in msgs:
666                         func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
667
668                 out_str = out.getvalue()
669
670                 self._task_output(out_str, log_path=log_path)
671
672         def _dblink_emerge_log(self, msg):
673                 self._logger.log(msg)
674
675         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
676                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
677                 background = self._background
678
679                 if log_path is None:
680                         if not (background and level < logging.WARN):
681                                 portage.util.writemsg_level(msg,
682                                         level=level, noiselevel=noiselevel)
683                 else:
684                         self._task_output(msg, log_path=log_path)
685
686         def _dblink_ebuild_phase(self,
687                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
688                 """
689                 Using this callback for merge phases allows the scheduler
690                 to run while these phases execute asynchronously, and allows
691                 the scheduler control output handling.
692                 """
693
694                 scheduler = self._sched_iface
695                 settings = pkg_dblink.settings
696                 pkg = self._dblink_pkg(pkg_dblink)
697                 background = self._background
698                 log_path = settings.get("PORTAGE_LOG_FILE")
699
700                 if phase in ('die_hooks', 'success_hooks'):
701                         ebuild_phase = MiscFunctionsProcess(background=background,
702                                 commands=[phase], phase=phase,
703                                 scheduler=scheduler, settings=settings)
704                 else:
705                         ebuild_phase = EbuildPhase(background=background,
706                                 phase=phase, scheduler=scheduler,
707                                 settings=settings)
708                 ebuild_phase.start()
709                 ebuild_phase.wait()
710
711                 return ebuild_phase.returncode
712
713         def _generate_digests(self):
714                 """
715                 Generate digests if necessary for --digests or FEATURES=digest.
716                 In order to avoid interference, this must done before parallel
717                 tasks are started.
718                 """
719
720                 if '--fetchonly' in self.myopts:
721                         return os.EX_OK
722
723                 digest = '--digest' in self.myopts
724                 if not digest:
725                         for pkgsettings in self.pkgsettings.values():
726                                 if pkgsettings.mycpv is not None:
727                                         # ensure that we are using global features
728                                         # settings rather than those from package.env
729                                         pkgsettings.reset()
730                                 if 'digest' in pkgsettings.features:
731                                         digest = True
732                                         break
733
734                 if not digest:
735                         return os.EX_OK
736
737                 for x in self._mergelist:
738                         if not isinstance(x, Package) or \
739                                 x.type_name != 'ebuild' or \
740                                 x.operation != 'merge':
741                                 continue
742                         pkgsettings = self.pkgsettings[x.root]
743                         if pkgsettings.mycpv is not None:
744                                 # ensure that we are using global features
745                                 # settings rather than those from package.env
746                                 pkgsettings.reset()
747                         if '--digest' not in self.myopts and \
748                                 'digest' not in pkgsettings.features:
749                                 continue
750                         portdb = x.root_config.trees['porttree'].dbapi
751                         ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
752                         if ebuild_path is None:
753                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
754                         pkgsettings['O'] = os.path.dirname(ebuild_path)
755                         if not digestgen(mysettings=pkgsettings, myportdb=portdb):
756                                 writemsg_level(
757                                         "!!! Unable to generate manifest for '%s'.\n" \
758                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
759                                 return 1
760
761                 return os.EX_OK
762
763         def _env_sanity_check(self):
764                 """
765                 Verify a sane environment before trying to build anything from source.
766                 """
767                 have_src_pkg = False
768                 for x in self._mergelist:
769                         if isinstance(x, Package) and not x.built:
770                                 have_src_pkg = True
771                                 break
772
773                 if not have_src_pkg:
774                         return os.EX_OK
775
776                 for settings in self.pkgsettings.values():
777                         for var in ("ARCH", ):
778                                 value = settings.get(var)
779                                 if value and value.strip():
780                                         continue
781                                 msg = _("%(var)s is not set... "
782                                         "Are you missing the '%(configroot)setc/make.profile' symlink? "
783                                         "Is the symlink correct? "
784                                         "Is your portage tree complete?") % \
785                                         {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
786
787                                 out = portage.output.EOutput()
788                                 for line in textwrap.wrap(msg, 70):
789                                         out.eerror(line)
790                                 return 1
791
792                 return os.EX_OK
793
794         def _check_manifests(self):
795                 # Verify all the manifests now so that the user is notified of failure
796                 # as soon as possible.
797                 if "strict" not in self.settings.features or \
798                         "--fetchonly" in self.myopts or \
799                         "--fetch-all-uri" in self.myopts:
800                         return os.EX_OK
801
802                 shown_verifying_msg = False
803                 quiet_settings = {}
804                 for myroot, pkgsettings in self.pkgsettings.items():
805                         quiet_config = portage.config(clone=pkgsettings)
806                         quiet_config["PORTAGE_QUIET"] = "1"
807                         quiet_config.backup_changes("PORTAGE_QUIET")
808                         quiet_settings[myroot] = quiet_config
809                         del quiet_config
810
811                 failures = 0
812
813                 for x in self._mergelist:
814                         if not isinstance(x, Package) or \
815                                 x.type_name != "ebuild":
816                                 continue
817
818                         if x.operation == "uninstall":
819                                 continue
820
821                         if not shown_verifying_msg:
822                                 shown_verifying_msg = True
823                                 self._status_msg("Verifying ebuild manifests")
824
825                         root_config = x.root_config
826                         portdb = root_config.trees["porttree"].dbapi
827                         quiet_config = quiet_settings[root_config.root]
828                         ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
829                         if ebuild_path is None:
830                                 raise AssertionError("ebuild not found for '%s'" % x.cpv)
831                         quiet_config["O"] = os.path.dirname(ebuild_path)
832                         if not digestcheck([], quiet_config, strict=True):
833                                 failures |= 1
834
835                 if failures:
836                         return 1
837                 return os.EX_OK
838
839         def _add_prefetchers(self):
840
841                 if not self._parallel_fetch:
842                         return
843
844                 if self._parallel_fetch:
845                         self._status_msg("Starting parallel fetch")
846
847                         prefetchers = self._prefetchers
848                         getbinpkg = "--getbinpkg" in self.myopts
849
850                         # In order to avoid "waiting for lock" messages
851                         # at the beginning, which annoy users, never
852                         # spawn a prefetcher for the first package.
853                         for pkg in self._mergelist[1:]:
854                                 # mergelist can contain solved Blocker instances
855                                 if not isinstance(pkg, Package) or pkg.operation == "uninstall":
856                                         continue
857                                 prefetcher = self._create_prefetcher(pkg)
858                                 if prefetcher is not None:
859                                         self._task_queues.fetch.add(prefetcher)
860                                         prefetchers[pkg] = prefetcher
861
862         def _create_prefetcher(self, pkg):
863                 """
864                 @return: a prefetcher, or None if not applicable
865                 """
866                 prefetcher = None
867
868                 if not isinstance(pkg, Package):
869                         pass
870
871                 elif pkg.type_name == "ebuild":
872
873                         prefetcher = EbuildFetcher(background=True,
874                                 config_pool=self._ConfigPool(pkg.root,
875                                 self._allocate_config, self._deallocate_config),
876                                 fetchonly=1, logfile=self._fetch_log,
877                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
878
879                 elif pkg.type_name == "binary" and \
880                         "--getbinpkg" in self.myopts and \
881                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
882
883                         prefetcher = BinpkgPrefetcher(background=True,
884                                 pkg=pkg, scheduler=self._sched_iface)
885
886                 return prefetcher
887
888         def _is_restart_scheduled(self):
889                 """
890                 Check if the merge list contains a replacement
891                 for the current running instance, that will result
892                 in restart after merge.
893                 @rtype: bool
894                 @returns: True if a restart is scheduled, False otherwise.
895                 """
896                 if self._opts_no_restart.intersection(self.myopts):
897                         return False
898
899                 mergelist = self._mergelist
900
901                 for i, pkg in enumerate(mergelist):
902                         if self._is_restart_necessary(pkg) and \
903                                 i != len(mergelist) - 1:
904                                 return True
905
906                 return False
907
908         def _is_restart_necessary(self, pkg):
909                 """
910                 @return: True if merging the given package
911                         requires restart, False otherwise.
912                 """
913
914                 # Figure out if we need a restart.
915                 if pkg.root == self._running_root.root and \
916                         portage.match_from_list(
917                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
918                         if self._running_portage is None:
919                                 return True
920                         elif pkg.cpv != self._running_portage.cpv or \
921                                 '9999' in pkg.cpv or \
922                                 'git' in pkg.inherited:
923                                 return True
924                 return False
925
926         def _restart_if_necessary(self, pkg):
927                 """
928                 Use execv() to restart emerge. This happens
929                 if portage upgrades itself and there are
930                 remaining packages in the list.
931                 """
932
933                 if self._opts_no_restart.intersection(self.myopts):
934                         return
935
936                 if not self._is_restart_necessary(pkg):
937                         return
938
939                 if pkg == self._mergelist[-1]:
940                         return
941
942                 self._main_loop_cleanup()
943
944                 logger = self._logger
945                 pkg_count = self._pkg_count
946                 mtimedb = self._mtimedb
947                 bad_resume_opts = self._bad_resume_opts
948
949                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
950                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
951
952                 logger.log(" *** RESTARTING " + \
953                         "emerge via exec() after change of " + \
954                         "portage version.")
955
956                 mtimedb["resume"]["mergelist"].remove(list(pkg))
957                 mtimedb.commit()
958                 portage.run_exitfuncs()
959                 # Don't trust sys.argv[0] here because eselect-python may modify it.
960                 emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
961                 mynewargv = [emerge_binary, "--resume"]
962                 resume_opts = self.myopts.copy()
963                 # For automatic resume, we need to prevent
964                 # any of bad_resume_opts from leaking in
965                 # via EMERGE_DEFAULT_OPTS.
966                 resume_opts["--ignore-default-opts"] = True
967                 for myopt, myarg in resume_opts.items():
968                         if myopt not in bad_resume_opts:
969                                 if myarg is True:
970                                         mynewargv.append(myopt)
971                                 elif isinstance(myarg, list):
972                                         # arguments like --exclude that use 'append' action
973                                         for x in myarg:
974                                                 mynewargv.append("%s=%s" % (myopt, x))
975                                 else:
976                                         mynewargv.append("%s=%s" % (myopt, myarg))
977                 # priority only needs to be adjusted on the first run
978                 os.environ["PORTAGE_NICENESS"] = "0"
979                 os.execv(mynewargv[0], mynewargv)
980
981         def _run_pkg_pretend(self):
982
983                 failures = 0
984
985                 for x in self._mergelist:
986                         if not isinstance(x, Package):
987                                 continue
988
989                         if x.operation == "uninstall":
990                                 continue
991
992                         if x.metadata["EAPI"] in ("0", "1", "2", "3"):
993                                 continue
994
995                         if "pretend" not in x.metadata.defined_phases:
996                                 continue
997
998                         out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
999                         portage.util.writemsg_stdout(out_str, noiselevel=-1)
1000
1001                         root_config = x.root_config
1002                         settings = self.pkgsettings[root_config.root]
1003                         settings.setcpv(x)
1004                         tmpdir = tempfile.mkdtemp()
1005                         tmpdir_orig = settings["PORTAGE_TMPDIR"]
1006                         settings["PORTAGE_TMPDIR"] = tmpdir
1007
1008                         if x.built:
1009                                 tree = "bintree"
1010                                 bintree = root_config.trees["bintree"].dbapi.bintree
1011                                 if bintree.isremote(x.cpv):
1012                                         fetcher = BinpkgPrefetcher(background=self._background,
1013                                                 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=x, scheduler=self._sched_iface)
1014                                         fetcher.start()
1015                                         fetcher.wait()
1016
1017                                 tbz2_file = bintree.getname(x.cpv)
1018                                 infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
1019                                 os.makedirs(infloc)
1020                                 portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
1021                                 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
1022                                 settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
1023                                 settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
1024
1025                         else:
1026                                 tree = "porttree"
1027                                 portdb = root_config.trees["porttree"].dbapi
1028                                 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
1029                                 if ebuild_path is None:
1030                                         raise AssertionError("ebuild not found for '%s'" % x.cpv)
1031                                 settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
1032                                 if self._build_opts.buildpkgonly:
1033                                         settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
1034                                 else:
1035                                         settings.configdict["pkg"]["MERGE_TYPE"] = "source"
1036
1037                         portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
1038                                 "pretend", settings=settings,
1039                                 db=self.trees[settings["ROOT"]][tree].dbapi)
1040                         prepare_build_dirs(root_config.root, settings, cleanup=0)
1041
1042                         vardb = root_config.trees['vartree'].dbapi
1043                         settings["REPLACING_VERSIONS"] = " ".join(
1044                                 set(portage.versions.cpv_getversion(match) \
1045                                         for match in vardb.match(x.slot_atom) + \
1046                                         vardb.match('='+x.cpv)))
1047                         pretend_phase = EbuildPhase(background=self._background,
1048                                 phase="pretend", scheduler=self._sched_iface,
1049                                 settings=settings)
1050
1051                         pretend_phase.start()
1052                         ret = pretend_phase.wait()
1053
1054                         portage.elog.elog_process(x.cpv, settings)
1055
1056                         if ret == os.EX_OK:
1057                                 shutil.rmtree(tmpdir)
1058                         settings["PORTAGE_TMPDIR"] = tmpdir_orig
1059
1060                         if ret != os.EX_OK:
1061                                 failures += 1
1062                 if failures:
1063                         return 1
1064                 return os.EX_OK
1065
1066         def merge(self):
1067                 if "--resume" in self.myopts:
1068                         # We're resuming.
1069                         portage.writemsg_stdout(
1070                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
1071                         self._logger.log(" *** Resuming merge...")
1072
1073                 self._save_resume_list()
1074
1075                 try:
1076                         self._background = self._background_mode()
1077                 except self._unknown_internal_error:
1078                         return 1
1079
1080                 for root in self.trees:
1081                         root_config = self.trees[root]["root_config"]
1082
1083                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
1084                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
1085                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
1086                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
1087                         if not tmpdir or not os.path.isdir(tmpdir):
1088                                 msg = "The directory specified in your " + \
1089                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
1090                                 "does not exist. Please create this " + \
1091                                 "directory or correct your PORTAGE_TMPDIR setting."
1092                                 msg = textwrap.wrap(msg, 70)
1093                                 out = portage.output.EOutput()
1094                                 for l in msg:
1095                                         out.eerror(l)
1096                                 return 1
1097
1098                         if self._background:
1099                                 root_config.settings.unlock()
1100                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
1101                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
1102                                 root_config.settings.lock()
1103
1104                         self.pkgsettings[root] = portage.config(
1105                                 clone=root_config.settings)
1106
1107                 keep_going = "--keep-going" in self.myopts
1108                 fetchonly = self._build_opts.fetchonly
1109                 mtimedb = self._mtimedb
1110                 failed_pkgs = self._failed_pkgs
1111
1112                 rval = self._generate_digests()
1113                 if rval != os.EX_OK:
1114                         return rval
1115
1116                 rval = self._env_sanity_check()
1117                 if rval != os.EX_OK:
1118                         return rval
1119
1120                 # TODO: Immediately recalculate deps here if --keep-going
1121                 #       is enabled and corrupt manifests are detected.
1122                 rval = self._check_manifests()
1123                 if rval != os.EX_OK and not keep_going:
1124                         return rval
1125
1126                 rval = self._run_pkg_pretend()
1127                 if rval != os.EX_OK:
1128                         return rval
1129
1130                 while True:
1131                         rval = self._merge()
1132                         if rval == os.EX_OK or fetchonly or not keep_going:
1133                                 break
1134                         if "resume" not in mtimedb:
1135                                 break
1136                         mergelist = self._mtimedb["resume"].get("mergelist")
1137                         if not mergelist:
1138                                 break
1139
1140                         if not failed_pkgs:
1141                                 break
1142
1143                         for failed_pkg in failed_pkgs:
1144                                 mergelist.remove(list(failed_pkg.pkg))
1145
1146                         self._failed_pkgs_all.extend(failed_pkgs)
1147                         del failed_pkgs[:]
1148
1149                         if not mergelist:
1150                                 break
1151
1152                         if not self._calc_resume_list():
1153                                 break
1154
1155                         clear_caches(self.trees)
1156                         if not self._mergelist:
1157                                 break
1158
1159                         self._save_resume_list()
1160                         self._pkg_count.curval = 0
1161                         self._pkg_count.maxval = len([x for x in self._mergelist \
1162                                 if isinstance(x, Package) and x.operation == "merge"])
1163                         self._status_display.maxval = self._pkg_count.maxval
1164
1165                 self._logger.log(" *** Finished. Cleaning up...")
1166
1167                 if failed_pkgs:
1168                         self._failed_pkgs_all.extend(failed_pkgs)
1169                         del failed_pkgs[:]
1170
1171                 printer = portage.output.EOutput()
1172                 background = self._background
1173                 failure_log_shown = False
1174                 if background and len(self._failed_pkgs_all) == 1:
1175                         # If only one package failed then just show it's
1176                         # whole log for easy viewing.
1177                         failed_pkg = self._failed_pkgs_all[-1]
1178                         build_dir = failed_pkg.build_dir
1179                         log_file = None
1180
1181                         log_paths = [failed_pkg.build_log]
1182
1183                         log_path = self._locate_failure_log(failed_pkg)
1184                         if log_path is not None:
1185                                 try:
1186                                         log_file = open(_unicode_encode(log_path,
1187                                                 encoding=_encodings['fs'], errors='strict'), mode='rb')
1188                                 except IOError:
1189                                         pass
1190                                 else:
1191                                         if log_path.endswith('.gz'):
1192                                                 log_file =  gzip.GzipFile(filename='',
1193                                                         mode='rb', fileobj=log_file)
1194
1195                         if log_file is not None:
1196                                 try:
1197                                         for line in log_file:
1198                                                 writemsg_level(line, noiselevel=-1)
1199                                 except zlib.error as e:
1200                                         writemsg_level("%s\n" % (e,), level=logging.ERROR,
1201                                                 noiselevel=-1)
1202                                 finally:
1203                                         log_file.close()
1204                                 failure_log_shown = True
1205
1206                 # Dump mod_echo output now since it tends to flood the terminal.
1207                 # This allows us to avoid having more important output, generated
1208                 # later, from being swept away by the mod_echo output.
1209                 mod_echo_output =  _flush_elog_mod_echo()
1210
1211                 if background and not failure_log_shown and \
1212                         self._failed_pkgs_all and \
1213                         self._failed_pkgs_die_msgs and \
1214                         not mod_echo_output:
1215
1216                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1217                                 root_msg = ""
1218                                 if mysettings["ROOT"] != "/":
1219                                         root_msg = " merged to %s" % mysettings["ROOT"]
1220                                 print()
1221                                 printer.einfo("Error messages for package %s%s:" % \
1222                                         (colorize("INFORM", key), root_msg))
1223                                 print()
1224                                 for phase in portage.const.EBUILD_PHASES:
1225                                         if phase not in logentries:
1226                                                 continue
1227                                         for msgtype, msgcontent in logentries[phase]:
1228                                                 if isinstance(msgcontent, basestring):
1229                                                         msgcontent = [msgcontent]
1230                                                 for line in msgcontent:
1231                                                         printer.eerror(line.strip("\n"))
1232
1233                 if self._post_mod_echo_msgs:
1234                         for msg in self._post_mod_echo_msgs:
1235                                 msg()
1236
1237                 if len(self._failed_pkgs_all) > 1 or \
1238                         (self._failed_pkgs_all and keep_going):
1239                         if len(self._failed_pkgs_all) > 1:
1240                                 msg = "The following %d packages have " % \
1241                                         len(self._failed_pkgs_all) + \
1242                                         "failed to build or install:"
1243                         else:
1244                                 msg = "The following package has " + \
1245                                         "failed to build or install:"
1246
1247                         printer.eerror("")
1248                         for line in textwrap.wrap(msg, 72):
1249                                 printer.eerror(line)
1250                         printer.eerror("")
1251                         for failed_pkg in self._failed_pkgs_all:
1252                                 # Use _unicode_decode() to force unicode format string so
1253                                 # that Package.__unicode__() is called in python2.
1254                                 msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
1255                                 log_path = self._locate_failure_log(failed_pkg)
1256                                 if log_path is not None:
1257                                         msg += ", Log file:"
1258                                 printer.eerror(msg)
1259                                 if log_path is not None:
1260                                         printer.eerror("  '%s'" % colorize('INFORM', log_path))
1261                         printer.eerror("")
1262
1263                 if self._failed_pkgs_all:
1264                         return 1
1265                 return os.EX_OK
1266
1267         def _elog_listener(self, mysettings, key, logentries, fulltext):
1268                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1269                 if errors:
1270                         self._failed_pkgs_die_msgs.append(
1271                                 (mysettings, key, errors))
1272
1273         def _locate_failure_log(self, failed_pkg):
1274
1275                 build_dir = failed_pkg.build_dir
1276                 log_file = None
1277
1278                 log_paths = [failed_pkg.build_log]
1279
1280                 for log_path in log_paths:
1281                         if not log_path:
1282                                 continue
1283
1284                         try:
1285                                 log_size = os.stat(log_path).st_size
1286                         except OSError:
1287                                 continue
1288
1289                         if log_size == 0:
1290                                 continue
1291
1292                         return log_path
1293
1294                 return None
1295
1296         def _add_packages(self):
1297                 pkg_queue = self._pkg_queue
1298                 for pkg in self._mergelist:
1299                         if isinstance(pkg, Package):
1300                                 pkg_queue.append(pkg)
1301                         elif isinstance(pkg, Blocker):
1302                                 pass
1303
1304         def _system_merge_started(self, merge):
1305                 """
1306                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1307                 """
1308                 graph = self._digraph
1309                 if graph is None:
1310                         return
1311                 pkg = merge.merge.pkg
1312
1313                 # Skip this if $ROOT != / since it shouldn't matter if there
1314                 # are unsatisfied system runtime deps in this case.
1315                 if pkg.root != '/':
1316                         return
1317
1318                 completed_tasks = self._completed_tasks
1319                 unsatisfied = self._unsatisfied_system_deps
1320
1321                 def ignore_non_runtime_or_satisfied(priority):
1322                         """
1323                         Ignore non-runtime and satisfied runtime priorities.
1324                         """
1325                         if isinstance(priority, DepPriority) and \
1326                                 not priority.satisfied and \
1327                                 (priority.runtime or priority.runtime_post):
1328                                 return False
1329                         return True
1330
1331                 # When checking for unsatisfied runtime deps, only check
1332                 # direct deps since indirect deps are checked when the
1333                 # corresponding parent is merged.
1334                 for child in graph.child_nodes(pkg,
1335                         ignore_priority=ignore_non_runtime_or_satisfied):
1336                         if not isinstance(child, Package) or \
1337                                 child.operation == 'uninstall':
1338                                 continue
1339                         if child is pkg:
1340                                 continue
1341                         if child.operation == 'merge' and \
1342                                 child not in completed_tasks:
1343                                 unsatisfied.add(child)
1344
1345         def _merge_wait_exit_handler(self, task):
1346                 self._merge_wait_scheduled.remove(task)
1347                 self._merge_exit(task)
1348
1349         def _merge_exit(self, merge):
1350                 self._do_merge_exit(merge)
1351                 self._deallocate_config(merge.merge.settings)
1352                 if merge.returncode == os.EX_OK and \
1353                         not merge.merge.pkg.installed:
1354                         self._status_display.curval += 1
1355                 self._status_display.merges = len(self._task_queues.merge)
1356                 self._schedule()
1357
1358         def _do_merge_exit(self, merge):
1359                 pkg = merge.merge.pkg
1360                 self._running_tasks.remove(pkg)
1361                 if merge.returncode != os.EX_OK:
1362                         settings = merge.merge.settings
1363                         build_dir = settings.get("PORTAGE_BUILDDIR")
1364                         build_log = settings.get("PORTAGE_LOG_FILE")
1365
1366                         self._failed_pkgs.append(self._failed_pkg(
1367                                 build_dir=build_dir, build_log=build_log,
1368                                 pkg=pkg,
1369                                 returncode=merge.returncode))
1370                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1371
1372                         self._status_display.failed = len(self._failed_pkgs)
1373                         return
1374
1375                 self._task_complete(pkg)
1376                 pkg_to_replace = merge.merge.pkg_to_replace
1377                 if pkg_to_replace is not None:
1378                         # When a package is replaced, mark it's uninstall
1379                         # task complete (if any).
1380                         uninst_hash_key = \
1381                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
1382                         self._task_complete(uninst_hash_key)
1383
1384                 if pkg.installed:
1385                         return
1386
1387                 self._restart_if_necessary(pkg)
1388
1389                 # Call mtimedb.commit() after each merge so that
1390                 # --resume still works after being interrupted
1391                 # by reboot, sigkill or similar.
1392                 mtimedb = self._mtimedb
1393                 mtimedb["resume"]["mergelist"].remove(list(pkg))
1394                 if not mtimedb["resume"]["mergelist"]:
1395                         del mtimedb["resume"]
1396                 mtimedb.commit()
1397
1398         def _build_exit(self, build):
1399                 if build.returncode == os.EX_OK:
1400                         self.curval += 1
1401                         merge = PackageMerge(merge=build)
1402                         if not build.build_opts.buildpkgonly and \
1403                                 build.pkg in self._deep_system_deps:
1404                                 # Since dependencies on system packages are frequently
1405                                 # unspecified, merge them only when no builds are executing.
1406                                 self._merge_wait_queue.append(merge)
1407                                 merge.addStartListener(self._system_merge_started)
1408                         else:
1409                                 merge.addExitListener(self._merge_exit)
1410                                 self._task_queues.merge.add(merge)
1411                                 self._status_display.merges = len(self._task_queues.merge)
1412                 else:
1413                         self._running_tasks.remove(build.pkg)
1414                         settings = build.settings
1415                         build_dir = settings.get("PORTAGE_BUILDDIR")
1416                         build_log = settings.get("PORTAGE_LOG_FILE")
1417
1418                         self._failed_pkgs.append(self._failed_pkg(
1419                                 build_dir=build_dir, build_log=build_log,
1420                                 pkg=build.pkg,
1421                                 returncode=build.returncode))
1422                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1423
1424                         self._status_display.failed = len(self._failed_pkgs)
1425                         self._deallocate_config(build.settings)
1426                 self._jobs -= 1
1427                 self._status_display.running = self._jobs
1428                 self._schedule()
1429
1430         def _extract_exit(self, build):
1431                 self._build_exit(build)
1432
1433         def _task_complete(self, pkg):
1434                 self._completed_tasks.add(pkg)
1435                 self._unsatisfied_system_deps.discard(pkg)
1436                 self._choose_pkg_return_early = False
1437
1438         def _merge(self):
1439
1440                 self._add_prefetchers()
1441                 self._add_packages()
1442                 pkg_queue = self._pkg_queue
1443                 failed_pkgs = self._failed_pkgs
1444                 portage.locks._quiet = self._background
1445                 portage.elog.add_listener(self._elog_listener)
1446                 rval = os.EX_OK
1447
1448                 try:
1449                         self._main_loop()
1450                 finally:
1451                         self._main_loop_cleanup()
1452                         portage.locks._quiet = False
1453                         portage.elog.remove_listener(self._elog_listener)
1454                         if failed_pkgs:
1455                                 rval = failed_pkgs[-1].returncode
1456
1457                 return rval
1458
1459         def _main_loop_cleanup(self):
1460                 del self._pkg_queue[:]
1461                 self._completed_tasks.clear()
1462                 self._deep_system_deps.clear()
1463                 self._unsatisfied_system_deps.clear()
1464                 self._choose_pkg_return_early = False
1465                 self._status_display.reset()
1466                 self._digraph = None
1467                 self._task_queues.fetch.clear()
1468
1469         def _choose_pkg(self):
1470                 """
1471                 Choose a task that has all it's dependencies satisfied.
1472                 """
1473
1474                 if self._choose_pkg_return_early:
1475                         return None
1476
1477                 if self._digraph is None:
1478                         if self._is_work_scheduled() and \
1479                                 not ("--nodeps" in self.myopts and \
1480                                 (self._max_jobs is True or self._max_jobs > 1)):
1481                                 self._choose_pkg_return_early = True
1482                                 return None
1483                         return self._pkg_queue.pop(0)
1484
1485                 if not self._is_work_scheduled():
1486                         return self._pkg_queue.pop(0)
1487
1488                 self._prune_digraph()
1489
1490                 chosen_pkg = None
1491
1492                 # Prefer uninstall operations when available.
1493                 graph = self._digraph
1494                 for pkg in self._pkg_queue:
1495                         if pkg.operation == 'uninstall' and \
1496                                 not graph.child_nodes(pkg):
1497                                 chosen_pkg = pkg
1498                                 break
1499
1500                 if chosen_pkg is None:
1501                         later = set(self._pkg_queue)
1502                         for pkg in self._pkg_queue:
1503                                 later.remove(pkg)
1504                                 if not self._dependent_on_scheduled_merges(pkg, later):
1505                                         chosen_pkg = pkg
1506                                         break
1507
1508                 if chosen_pkg is not None:
1509                         self._pkg_queue.remove(chosen_pkg)
1510
1511                 if chosen_pkg is None:
1512                         # There's no point in searching for a package to
1513                         # choose until at least one of the existing jobs
1514                         # completes.
1515                         self._choose_pkg_return_early = True
1516
1517                 return chosen_pkg
1518
1519         def _dependent_on_scheduled_merges(self, pkg, later):
1520                 """
1521                 Traverse the subgraph of the given packages deep dependencies
1522                 to see if it contains any scheduled merges.
1523                 @param pkg: a package to check dependencies for
1524                 @type pkg: Package
1525                 @param later: packages for which dependence should be ignored
1526                         since they will be merged later than pkg anyway and therefore
1527                         delaying the merge of pkg will not result in a more optimal
1528                         merge order
1529                 @type later: set
1530                 @rtype: bool
1531                 @returns: True if the package is dependent, False otherwise.
1532                 """
1533
1534                 graph = self._digraph
1535                 completed_tasks = self._completed_tasks
1536
1537                 dependent = False
1538                 traversed_nodes = set([pkg])
1539                 direct_deps = graph.child_nodes(pkg)
1540                 node_stack = direct_deps
1541                 direct_deps = frozenset(direct_deps)
1542                 while node_stack:
1543                         node = node_stack.pop()
1544                         if node in traversed_nodes:
1545                                 continue
1546                         traversed_nodes.add(node)
1547                         if not ((node.installed and node.operation == "nomerge") or \
1548                                 (node.operation == "uninstall" and \
1549                                 node not in direct_deps) or \
1550                                 node in completed_tasks or \
1551                                 node in later):
1552                                 dependent = True
1553                                 break
1554
1555                         # Don't traverse children of uninstall nodes since
1556                         # those aren't dependencies in the usual sense.
1557                         if node.operation != "uninstall":
1558                                 node_stack.extend(graph.child_nodes(node))
1559
1560                 return dependent
1561
1562         def _allocate_config(self, root):
1563                 """
1564                 Allocate a unique config instance for a task in order
1565                 to prevent interference between parallel tasks.
1566                 """
1567                 if self._config_pool[root]:
1568                         temp_settings = self._config_pool[root].pop()
1569                 else:
1570                         temp_settings = portage.config(clone=self.pkgsettings[root])
1571                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
1572                 # performance reasons, call it here to make sure all settings from the
1573                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
1574                 temp_settings.reload()
1575                 temp_settings.reset()
1576                 return temp_settings
1577
1578         def _deallocate_config(self, settings):
1579                 self._config_pool[settings["ROOT"]].append(settings)
1580
1581         def _main_loop(self):
1582
1583                 # Only allow 1 job max if a restart is scheduled
1584                 # due to portage update.
1585                 if self._is_restart_scheduled() or \
1586                         self._opts_no_background.intersection(self.myopts):
1587                         self._set_max_jobs(1)
1588
1589                 while self._schedule():
1590                         if self._poll_event_handlers:
1591                                 self._poll_loop()
1592
1593                 while True:
1594                         self._schedule()
1595                         if not self._is_work_scheduled():
1596                                 break
1597                         if self._poll_event_handlers:
1598                                 self._poll_loop()
1599
1600         def _keep_scheduling(self):
1601                 return bool(self._pkg_queue and \
1602                         not (self._failed_pkgs and not self._build_opts.fetchonly))
1603
1604         def _is_work_scheduled(self):
1605                 return bool(self._running_tasks)
1606
1607         def _schedule_tasks(self):
1608
1609                 while True:
1610
1611                         # When the number of jobs drops to zero, process all waiting merges.
1612                         if not self._jobs and self._merge_wait_queue:
1613                                 for task in self._merge_wait_queue:
1614                                         task.addExitListener(self._merge_wait_exit_handler)
1615                                         self._task_queues.merge.add(task)
1616                                 self._status_display.merges = len(self._task_queues.merge)
1617                                 self._merge_wait_scheduled.extend(self._merge_wait_queue)
1618                                 del self._merge_wait_queue[:]
1619
1620                         self._schedule_tasks_imp()
1621                         self._status_display.display()
1622
1623                         state_change = 0
1624                         for q in self._task_queues.values():
1625                                 if q.schedule():
1626                                         state_change += 1
1627
1628                         # Cancel prefetchers if they're the only reason
1629                         # the main poll loop is still running.
1630                         if self._failed_pkgs and not self._build_opts.fetchonly and \
1631                                 not self._is_work_scheduled() and \
1632                                 self._task_queues.fetch:
1633                                 self._task_queues.fetch.clear()
1634                                 state_change += 1
1635
1636                         if not (state_change or \
1637                                 (not self._jobs and self._merge_wait_queue)):
1638                                 break
1639
1640                 return self._keep_scheduling()
1641
1642         def _job_delay(self):
1643                 """
1644                 @rtype: bool
1645                 @returns: True if job scheduling should be delayed, False otherwise.
1646                 """
1647
1648                 if self._jobs and self._max_load is not None:
1649
1650                         current_time = time.time()
1651
1652                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1653                         if delay > self._job_delay_max:
1654                                 delay = self._job_delay_max
1655                         if (current_time - self._previous_job_start_time) < delay:
1656                                 return True
1657
1658                 return False
1659
1660         def _schedule_tasks_imp(self):
1661                 """
1662                 @rtype: bool
1663                 @returns: True if state changed, False otherwise.
1664                 """
1665
1666                 state_change = 0
1667
1668                 while True:
1669
1670                         if not self._keep_scheduling():
1671                                 return bool(state_change)
1672
1673                         if self._choose_pkg_return_early or \
1674                                 self._merge_wait_scheduled or \
1675                                 (self._jobs and self._unsatisfied_system_deps) or \
1676                                 not self._can_add_job() or \
1677                                 self._job_delay():
1678                                 return bool(state_change)
1679
1680                         pkg = self._choose_pkg()
1681                         if pkg is None:
1682                                 return bool(state_change)
1683
1684                         state_change += 1
1685
1686                         if not pkg.installed:
1687                                 self._pkg_count.curval += 1
1688
1689                         task = self._task(pkg)
1690                         self._running_tasks.add(pkg)
1691
1692                         if pkg.installed:
1693                                 merge = PackageMerge(merge=task)
1694                                 merge.addExitListener(self._merge_exit)
1695                                 self._task_queues.merge.addFront(merge)
1696
1697                         elif pkg.built:
1698                                 self._jobs += 1
1699                                 self._previous_job_start_time = time.time()
1700                                 self._status_display.running = self._jobs
1701                                 task.addExitListener(self._extract_exit)
1702                                 self._task_queues.jobs.add(task)
1703
1704                         else:
1705                                 self._jobs += 1
1706                                 self._previous_job_start_time = time.time()
1707                                 self._status_display.running = self._jobs
1708                                 task.addExitListener(self._build_exit)
1709                                 self._task_queues.jobs.add(task)
1710
1711                 return bool(state_change)
1712
1713         def _task(self, pkg):
1714
1715                 pkg_to_replace = None
1716                 if pkg.operation != "uninstall":
1717                         vardb = pkg.root_config.trees["vartree"].dbapi
1718                         previous_cpv = vardb.match(pkg.slot_atom)
1719                         if previous_cpv:
1720                                 previous_cpv = previous_cpv.pop()
1721                                 pkg_to_replace = self._pkg(previous_cpv,
1722                                         "installed", pkg.root_config, installed=True)
1723
1724                 task = MergeListItem(args_set=self._args_set,
1725                         background=self._background, binpkg_opts=self._binpkg_opts,
1726                         build_opts=self._build_opts,
1727                         config_pool=self._ConfigPool(pkg.root,
1728                         self._allocate_config, self._deallocate_config),
1729                         emerge_opts=self.myopts,
1730                         find_blockers=self._find_blockers(pkg), logger=self._logger,
1731                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1732                         pkg_to_replace=pkg_to_replace,
1733                         prefetcher=self._prefetchers.get(pkg),
1734                         scheduler=self._sched_iface,
1735                         settings=self._allocate_config(pkg.root),
1736                         statusMessage=self._status_msg,
1737                         world_atom=self._world_atom)
1738
1739                 return task
1740
1741         def _failed_pkg_msg(self, failed_pkg, action, preposition):
1742                 pkg = failed_pkg.pkg
1743                 msg = "%s to %s %s" % \
1744                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1745                 if pkg.root != "/":
1746                         msg += " %s %s" % (preposition, pkg.root)
1747
1748                 log_path = self._locate_failure_log(failed_pkg)
1749                 if log_path is not None:
1750                         msg += ", Log file:"
1751                 self._status_msg(msg)
1752
1753                 if log_path is not None:
1754                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1755
1756         def _status_msg(self, msg):
1757                 """
1758                 Display a brief status message (no newlines) in the status display.
1759                 This is called by tasks to provide feedback to the user. This
1760                 delegates the resposibility of generating \r and \n control characters,
1761                 to guarantee that lines are created or erased when necessary and
1762                 appropriate.
1763
1764                 @type msg: str
1765                 @param msg: a brief status message (no newlines allowed)
1766                 """
1767                 if not self._background:
1768                         writemsg_level("\n")
1769                 self._status_display.displayMessage(msg)
1770
1771         def _save_resume_list(self):
1772                 """
1773                 Do this before verifying the ebuild Manifests since it might
1774                 be possible for the user to use --resume --skipfirst get past
1775                 a non-essential package with a broken digest.
1776                 """
1777                 mtimedb = self._mtimedb
1778
1779                 mtimedb["resume"] = {}
1780                 # Stored as a dict starting with portage-2.1.6_rc1, and supported
1781                 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
1782                 # a list type for options.
1783                 mtimedb["resume"]["myopts"] = self.myopts.copy()
1784
1785                 # Convert Atom instances to plain str.
1786                 mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
1787                 mtimedb["resume"]["mergelist"] = [list(x) \
1788                         for x in self._mergelist \
1789                         if isinstance(x, Package) and x.operation == "merge"]
1790
1791                 mtimedb.commit()
1792
1793         def _calc_resume_list(self):
1794                 """
1795                 Use the current resume list to calculate a new one,
1796                 dropping any packages with unsatisfied deps.
1797                 @rtype: bool
1798                 @returns: True if successful, False otherwise.
1799                 """
1800                 print(colorize("GOOD", "*** Resuming merge..."))
1801
1802                 # free some memory before creating
1803                 # the resume depgraph
1804                 self._destroy_graph()
1805
1806                 myparams = create_depgraph_params(self.myopts, None)
1807                 success = False
1808                 e = None
1809                 try:
1810                         success, mydepgraph, dropped_tasks = resume_depgraph(
1811                                 self.settings, self.trees, self._mtimedb, self.myopts,
1812                                 myparams, self._spinner)
1813                 except depgraph.UnsatisfiedResumeDep as exc:
1814                         # rename variable to avoid python-3.0 error:
1815                         # SyntaxError: can not delete variable 'e' referenced in nested
1816                         #              scope
1817                         e = exc
1818                         mydepgraph = e.depgraph
1819                         dropped_tasks = set()
1820
1821                 if e is not None:
1822                         def unsatisfied_resume_dep_msg():
1823                                 mydepgraph.display_problems()
1824                                 out = portage.output.EOutput()
1825                                 out.eerror("One or more packages are either masked or " + \
1826                                         "have missing dependencies:")
1827                                 out.eerror("")
1828                                 indent = "  "
1829                                 show_parents = set()
1830                                 for dep in e.value:
1831                                         if dep.parent in show_parents:
1832                                                 continue
1833                                         show_parents.add(dep.parent)
1834                                         if dep.atom is None:
1835                                                 out.eerror(indent + "Masked package:")
1836                                                 out.eerror(2 * indent + str(dep.parent))
1837                                                 out.eerror("")
1838                                         else:
1839                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
1840                                                 out.eerror(2 * indent + str(dep.parent))
1841                                                 out.eerror("")
1842                                 msg = "The resume list contains packages " + \
1843                                         "that are either masked or have " + \
1844                                         "unsatisfied dependencies. " + \
1845                                         "Please restart/continue " + \
1846                                         "the operation manually, or use --skipfirst " + \
1847                                         "to skip the first package in the list and " + \
1848                                         "any other packages that may be " + \
1849                                         "masked or have missing dependencies."
1850                                 for line in textwrap.wrap(msg, 72):
1851                                         out.eerror(line)
1852                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1853                         return False
1854
1855                 if success and self._show_list():
1856                         mylist = mydepgraph.altlist()
1857                         if mylist:
1858                                 if "--tree" in self.myopts:
1859                                         mylist.reverse()
1860                                 mydepgraph.display(mylist, favorites=self._favorites)
1861
1862                 if not success:
1863                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1864                         return False
1865                 mydepgraph.display_problems()
1866                 self._init_graph(mydepgraph.schedulerGraph())
1867
1868                 msg_width = 75
1869                 for task in dropped_tasks:
1870                         if not (isinstance(task, Package) and task.operation == "merge"):
1871                                 continue
1872                         pkg = task
1873                         msg = "emerge --keep-going:" + \
1874                                 " %s" % (pkg.cpv,)
1875                         if pkg.root != "/":
1876                                 msg += " for %s" % (pkg.root,)
1877                         msg += " dropped due to unsatisfied dependency."
1878                         for line in textwrap.wrap(msg, msg_width):
1879                                 eerror(line, phase="other", key=pkg.cpv)
1880                         settings = self.pkgsettings[pkg.root]
1881                         # Ensure that log collection from $T is disabled inside
1882                         # elog_process(), since any logs that might exist are
1883                         # not valid here.
1884                         settings.pop("T", None)
1885                         portage.elog.elog_process(pkg.cpv, settings)
1886                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1887
1888                 return True
1889
1890         def _show_list(self):
1891                 myopts = self.myopts
1892                 if "--quiet" not in myopts and \
1893                         ("--ask" in myopts or "--tree" in myopts or \
1894                         "--verbose" in myopts):
1895                         return True
1896                 return False
1897
1898         def _world_atom(self, pkg):
1899                 """
1900                 Add or remove the package to the world file, but only if
1901                 it's supposed to be added or removed. Otherwise, do nothing.
1902                 """
1903
1904                 if set(("--buildpkgonly", "--fetchonly",
1905                         "--fetch-all-uri",
1906                         "--oneshot", "--onlydeps",
1907                         "--pretend")).intersection(self.myopts):
1908                         return
1909
1910                 if pkg.root != self.target_root:
1911                         return
1912
1913                 args_set = self._args_set
1914                 if not args_set.findAtomForPackage(pkg):
1915                         return
1916
1917                 logger = self._logger
1918                 pkg_count = self._pkg_count
1919                 root_config = pkg.root_config
1920                 world_set = root_config.sets["selected"]
1921                 world_locked = False
1922                 if hasattr(world_set, "lock"):
1923                         world_set.lock()
1924                         world_locked = True
1925
1926                 try:
1927                         if hasattr(world_set, "load"):
1928                                 world_set.load() # maybe it's changed on disk
1929
1930                         if pkg.operation == "uninstall":
1931                                 if hasattr(world_set, "cleanPackage"):
1932                                         world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
1933                                                         pkg.cpv)
1934                                 if hasattr(world_set, "remove"):
1935                                         for s in pkg.root_config.setconfig.active:
1936                                                 world_set.remove(SETPREFIX+s)
1937                         else:
1938                                 atom = create_world_atom(pkg, args_set, root_config)
1939                                 if atom:
1940                                         if hasattr(world_set, "add"):
1941                                                 self._status_msg(('Recording %s in "world" ' + \
1942                                                         'favorites file...') % atom)
1943                                                 logger.log(" === (%s of %s) Updating world file (%s)" % \
1944                                                         (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1945                                                 world_set.add(atom)
1946                                         else:
1947                                                 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1948                                                         (atom,), level=logging.WARN, noiselevel=-1)
1949                 finally:
1950                         if world_locked:
1951                                 world_set.unlock()
1952
1953         def _pkg(self, cpv, type_name, root_config, installed=False):
1954                 """
1955                 Get a package instance from the cache, or create a new
1956                 one if necessary. Raises KeyError from aux_get if it
1957                 failures for some reason (package does not exist or is
1958                 corrupt).
1959                 """
1960                 operation = "merge"
1961                 if installed:
1962                         operation = "nomerge"
1963
1964                 if self._digraph is not None:
1965                         # Reuse existing instance when available.
1966                         pkg = self._digraph.get(
1967                                 (type_name, root_config.root, cpv, operation))
1968                         if pkg is not None:
1969                                 return pkg
1970
1971                 tree_type = depgraph.pkg_tree_map[type_name]
1972                 db = root_config.trees[tree_type].dbapi
1973                 db_keys = list(self.trees[root_config.root][
1974                         tree_type].dbapi._aux_cache_keys)
1975                 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
1976                 return Package(built=(type_name != 'ebuild'),
1977                         cpv=cpv, metadata=metadata,
1978                         root_config=root_config, installed=installed)