1 # Copyright 1999-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
18 from portage import StringIO
19 from portage import os
20 from portage import _encodings
21 from portage import _unicode_encode
22 from portage.cache.mappings import slot_dict_class
23 from portage.const import LIBC_PACKAGE_ATOM
24 from portage.elog.messages import eerror
25 from portage.localization import _
26 from portage.output import colorize, create_color_func, red
27 bad = create_color_func("BAD")
28 from portage._sets import SETPREFIX
29 from portage._sets.base import InternalPackageSet
30 from portage.util import writemsg, writemsg_level
31 from portage.package.ebuild.digestcheck import digestcheck
32 from portage.package.ebuild.digestgen import digestgen
33 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
35 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
36 from _emerge.Blocker import Blocker
37 from _emerge.BlockerDB import BlockerDB
38 from _emerge.clear_caches import clear_caches
39 from _emerge.create_depgraph_params import create_depgraph_params
40 from _emerge.create_world_atom import create_world_atom
41 from _emerge.DepPriority import DepPriority
42 from _emerge.depgraph import depgraph, resume_depgraph
43 from _emerge.EbuildFetcher import EbuildFetcher
44 from _emerge.EbuildPhase import EbuildPhase
45 from _emerge.emergelog import emergelog, _emerge_log_dir
46 from _emerge.FakeVartree import FakeVartree
47 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
48 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
49 from _emerge.JobStatusDisplay import JobStatusDisplay
50 from _emerge.MergeListItem import MergeListItem
51 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
52 from _emerge.Package import Package
53 from _emerge.PackageMerge import PackageMerge
54 from _emerge.PollScheduler import PollScheduler
55 from _emerge.RootConfig import RootConfig
56 from _emerge.SlotObject import SlotObject
57 from _emerge.SequentialTaskQueue import SequentialTaskQueue
59 if sys.hexversion >= 0x3000000:
62 class Scheduler(PollScheduler):
64 _opts_ignore_blockers = \
65 frozenset(["--buildpkgonly",
66 "--fetchonly", "--fetch-all-uri",
67 "--nodeps", "--pretend"])
69 _opts_no_background = \
70 frozenset(["--pretend",
71 "--fetchonly", "--fetch-all-uri"])
73 _opts_no_restart = frozenset(["--buildpkgonly",
74 "--fetchonly", "--fetch-all-uri", "--pretend"])
76 _bad_resume_opts = set(["--ask", "--changelog",
77 "--resume", "--skipfirst"])
79 _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
81 class _iface_class(SlotObject):
82 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
83 "dblinkElog", "dblinkEmergeLog", "fetch",
84 "output", "register", "schedule",
85 "scheduleSetup", "scheduleUnpack", "scheduleYield",
88 class _fetch_iface_class(SlotObject):
89 __slots__ = ("log_file", "schedule")
91 _task_queues_class = slot_dict_class(
92 ("merge", "jobs", "fetch", "unpack"), prefix="")
94 class _build_opts_class(SlotObject):
95 __slots__ = ("buildpkg", "buildpkgonly",
96 "fetch_all_uri", "fetchonly", "pretend")
98 class _binpkg_opts_class(SlotObject):
99 __slots__ = ("fetchonly", "getbinpkg", "pretend")
101 class _pkg_count_class(SlotObject):
102 __slots__ = ("curval", "maxval")
104 class _emerge_log_class(SlotObject):
105 __slots__ = ("xterm_titles",)
107 def log(self, *pargs, **kwargs):
108 if not self.xterm_titles:
109 # Avoid interference with the scheduler's status display.
110 kwargs.pop("short_msg", None)
111 emergelog(self.xterm_titles, *pargs, **kwargs)
113 class _failed_pkg(SlotObject):
114 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
116 class _ConfigPool(object):
117 """Interface for a task to temporarily allocate a config
118 instance from a pool. This allows a task to be constructed
119 long before the config instance actually becomes needed, like
120 when prefetchers are constructed for the whole merge list."""
121 __slots__ = ("_root", "_allocate", "_deallocate")
122 def __init__(self, root, allocate, deallocate):
124 self._allocate = allocate
125 self._deallocate = deallocate
127 return self._allocate(self._root)
128 def deallocate(self, settings):
129 self._deallocate(settings)
131 class _unknown_internal_error(portage.exception.PortageException):
133 Used internally to terminate scheduling. The specific reason for
134 the failure should have been dumped to stderr.
136 def __init__(self, value=""):
137 portage.exception.PortageException.__init__(self, value)
139 def __init__(self, settings, trees, mtimedb, myopts,
140 spinner, mergelist, favorites, digraph):
141 PollScheduler.__init__(self)
142 self.settings = settings
143 self.target_root = settings["ROOT"]
146 self._spinner = spinner
147 self._mtimedb = mtimedb
148 self._mergelist = mergelist
149 self._favorites = favorites
150 self._args_set = InternalPackageSet(favorites)
151 self._build_opts = self._build_opts_class()
152 for k in self._build_opts.__slots__:
153 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
154 self._binpkg_opts = self._binpkg_opts_class()
155 for k in self._binpkg_opts.__slots__:
156 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
159 self._logger = self._emerge_log_class()
160 self._task_queues = self._task_queues_class()
161 for k in self._task_queues.allowed_keys:
162 setattr(self._task_queues, k,
163 SequentialTaskQueue())
165 # Holds merges that will wait to be executed when no builds are
166 # executing. This is useful for system packages since dependencies
167 # on system packages are frequently unspecified.
168 self._merge_wait_queue = []
169 # Holds merges that have been transfered from the merge_wait_queue to
170 # the actual merge queue. They are removed from this list upon
171 # completion. Other packages can start building only when this list is
173 self._merge_wait_scheduled = []
175 # Holds system packages and their deep runtime dependencies. Before
176 # being merged, these packages go to merge_wait_queue, to be merged
177 # when no other packages are building.
178 self._deep_system_deps = set()
180 # Holds packages to merge which will satisfy currently unsatisfied
181 # deep runtime dependencies of system packages. If this is not empty
182 # then no parallel builds will be spawned until it is empty. This
183 # minimizes the possibility that a build will fail due to the system
184 # being in a fragile state. For example, see bug #259954.
185 self._unsatisfied_system_deps = set()
187 self._status_display = JobStatusDisplay(
188 xterm_titles=('notitles' not in settings.features))
189 self._max_load = myopts.get("--load-average")
190 max_jobs = myopts.get("--jobs")
193 self._set_max_jobs(max_jobs)
195 # The root where the currently running
196 # portage instance is installed.
197 self._running_root = trees["/"]["root_config"]
199 if settings.get("PORTAGE_DEBUG", "") == "1":
201 self.pkgsettings = {}
202 self._config_pool = {}
203 for root in self.trees:
204 self._config_pool[root] = []
206 self._init_installed_graph()
208 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
209 schedule=self._schedule_fetch)
210 self._sched_iface = self._iface_class(
211 dblinkEbuildPhase=self._dblink_ebuild_phase,
212 dblinkDisplayMerge=self._dblink_display_merge,
213 dblinkElog=self._dblink_elog,
214 dblinkEmergeLog=self._dblink_emerge_log,
215 fetch=fetch_iface, output=self._task_output,
216 register=self._register,
217 schedule=self._schedule_wait,
218 scheduleSetup=self._schedule_setup,
219 scheduleUnpack=self._schedule_unpack,
220 scheduleYield=self._schedule_yield,
221 unregister=self._unregister)
223 self._prefetchers = weakref.WeakValueDictionary()
225 self._running_tasks = set()
226 self._completed_tasks = set()
228 self._failed_pkgs = []
229 self._failed_pkgs_all = []
230 self._failed_pkgs_die_msgs = []
231 self._post_mod_echo_msgs = []
232 self._parallel_fetch = False
233 merge_count = len([x for x in mergelist \
234 if isinstance(x, Package) and x.operation == "merge"])
235 self._pkg_count = self._pkg_count_class(
236 curval=0, maxval=merge_count)
237 self._status_display.maxval = self._pkg_count.maxval
239 # The load average takes some time to respond when new
240 # jobs are added, so we need to limit the rate of adding
242 self._job_delay_max = 10
243 self._job_delay_factor = 1.0
244 self._job_delay_exp = 1.5
245 self._previous_job_start_time = None
247 self._set_digraph(digraph)
249 # This is used to memoize the _choose_pkg() result when
250 # no packages can be chosen until one of the existing
252 self._choose_pkg_return_early = False
254 features = self.settings.features
255 if "parallel-fetch" in features and \
256 not ("--pretend" in self.myopts or \
257 "--fetch-all-uri" in self.myopts or \
258 "--fetchonly" in self.myopts):
259 if "distlocks" not in features:
260 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
261 portage.writemsg(red("!!!")+" parallel-fetching " + \
262 "requires the distlocks feature enabled"+"\n",
264 portage.writemsg(red("!!!")+" you have it disabled, " + \
265 "thus parallel-fetching is being disabled"+"\n",
267 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
268 elif merge_count > 1:
269 self._parallel_fetch = True
271 if self._parallel_fetch:
272 # clear out existing fetch log if it exists
274 open(self._fetch_log, 'w')
275 except EnvironmentError:
278 self._running_portage = None
279 portage_match = self._running_root.trees["vartree"].dbapi.match(
280 portage.const.PORTAGE_PACKAGE_ATOM)
282 cpv = portage_match.pop()
283 self._running_portage = self._pkg(cpv, "installed",
284 self._running_root, installed=True)
286 def _init_installed_graph(self):
288 Initialization structures used for dependency calculations
289 involving currently installed packages.
291 # TODO: Replace the BlockerDB with a depgraph of installed packages
292 # that's updated incrementally with each upgrade/uninstall operation
293 # This will be useful for making quick and safe decisions with respect
294 # to aggressive parallelization discussed in bug #279623.
295 self._blocker_db = {}
296 for root in self.trees:
297 self._blocker_db[root] = \
298 BlockerDB(FakeVartree(self.trees[root]["root_config"]))
300 def _destroy_installed_graph(self):
302 Use this to free memory before calling _calc_resume_list().
303 After _calc_resume_list(), the _init_installed_graph() and
304 _set_digraph() methods need to be called in order to
305 re-generate the structures that this method destroys.
307 self._blocker_db = None
308 self._set_digraph(None)
311 def _poll(self, timeout=None):
313 PollScheduler._poll(self, timeout=timeout)
315 def _set_max_jobs(self, max_jobs):
316 self._max_jobs = max_jobs
317 self._task_queues.jobs.max_jobs = max_jobs
319 def _background_mode(self):
321 Check if background mode is enabled and adjust states as necessary.
324 @returns: True if background mode is enabled, False otherwise.
326 background = (self._max_jobs is True or \
327 self._max_jobs > 1 or "--quiet" in self.myopts \
328 or "--quiet-build" in self.myopts) and \
329 not bool(self._opts_no_background.intersection(self.myopts))
332 interactive_tasks = self._get_interactive_tasks()
333 if interactive_tasks:
335 writemsg_level(">>> Sending package output to stdio due " + \
336 "to interactive package(s):\n",
337 level=logging.INFO, noiselevel=-1)
339 for pkg in interactive_tasks:
340 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
342 pkg_str += " for " + pkg.root
345 writemsg_level("".join("%s\n" % (l,) for l in msg),
346 level=logging.INFO, noiselevel=-1)
347 if self._max_jobs is True or self._max_jobs > 1:
348 self._set_max_jobs(1)
349 writemsg_level(">>> Setting --jobs=1 due " + \
350 "to the above interactive package(s)\n",
351 level=logging.INFO, noiselevel=-1)
352 writemsg_level(">>> In order to temporarily mask " + \
353 "interactive updates, you may\n" + \
354 ">>> specify --accept-properties=-interactive\n",
355 level=logging.INFO, noiselevel=-1)
356 self._status_display.quiet = \
358 ("--quiet" in self.myopts and \
359 "--verbose" not in self.myopts)
361 self._logger.xterm_titles = \
362 "notitles" not in self.settings.features and \
363 self._status_display.quiet
367 def _get_interactive_tasks(self):
368 interactive_tasks = []
369 for task in self._mergelist:
370 if not (isinstance(task, Package) and \
371 task.operation == "merge"):
373 if 'interactive' in task.metadata.properties:
374 interactive_tasks.append(task)
375 return interactive_tasks
377 def _set_digraph(self, digraph):
378 if "--nodeps" in self.myopts or \
380 (self._max_jobs is not True and self._max_jobs < 2):
385 self._digraph = digraph
386 self._find_system_deps()
387 self._prune_digraph()
388 self._prevent_builddir_collisions()
389 self._implicit_libc_deps()
390 if '--debug' in self.myopts:
391 writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
392 self._digraph.debug_print()
393 writemsg("\n", noiselevel=-1)
395 def _find_system_deps(self):
397 Find system packages and their deep runtime dependencies. Before being
398 merged, these packages go to merge_wait_queue, to be merged when no
399 other packages are building.
400 NOTE: This can only find deep system deps if the system set has been
401 added to the graph and traversed deeply (the depgraph "complete"
402 parameter will do this, triggered by emerge --complete-graph option).
404 deep_system_deps = self._deep_system_deps
405 deep_system_deps.clear()
406 deep_system_deps.update(
407 _find_deep_system_runtime_deps(self._digraph))
408 deep_system_deps.difference_update([pkg for pkg in \
409 deep_system_deps if pkg.operation != "merge"])
411 def _prune_digraph(self):
413 Prune any root nodes that are irrelevant.
416 graph = self._digraph
417 completed_tasks = self._completed_tasks
418 removed_nodes = set()
420 for node in graph.root_nodes():
421 if not isinstance(node, Package) or \
422 (node.installed and node.operation == "nomerge") or \
424 node in completed_tasks:
425 removed_nodes.add(node)
427 graph.difference_update(removed_nodes)
428 if not removed_nodes:
430 removed_nodes.clear()
432 def _prevent_builddir_collisions(self):
434 When building stages, sometimes the same exact cpv needs to be merged
435 to both $ROOTs. Add edges to the digraph in order to avoid collisions
436 in the builddir. Currently, normal file locks would be inappropriate
437 for this purpose since emerge holds all of it's build dir locks from
441 for pkg in self._mergelist:
442 if not isinstance(pkg, Package):
443 # a satisfied blocker
447 if pkg.cpv not in cpv_map:
448 cpv_map[pkg.cpv] = [pkg]
450 for earlier_pkg in cpv_map[pkg.cpv]:
451 self._digraph.add(earlier_pkg, pkg,
452 priority=DepPriority(buildtime=True))
453 cpv_map[pkg.cpv].append(pkg)
455 def _implicit_libc_deps(self):
457 Create implicit dependencies on libc, in order to ensure that libc
458 is installed as early as possible (see bug #303567). If the merge
459 list contains both a new-style virtual and an old-style PROVIDE
460 virtual, the new-style virtual is used.
462 implicit_libc_roots = set([self._running_root.root])
463 libc_set = InternalPackageSet([LIBC_PACKAGE_ATOM])
466 for pkg in self._mergelist:
467 if not isinstance(pkg, Package):
468 # a satisfied blocker
472 if pkg.root in implicit_libc_roots and \
473 pkg.operation == 'merge':
474 if libc_set.findAtomForPackage(pkg):
475 if pkg.category == 'virtual':
480 raise AssertionError(
481 "found 2 libc matches: %s and %s" % \
485 # Prefer new-style virtuals over old-style PROVIDE virtuals.
486 libc_pkg_map = norm_libc_pkgs.copy()
487 libc_pkg_map.update(virt_libc_pkgs)
489 # Only add a dep when the version changes.
490 for libc_pkg in list(libc_pkg_map.values()):
491 if libc_pkg.root_config.trees['vartree'].dbapi.cpv_exists(
493 del libc_pkg_map[pkg.root]
498 libc_pkgs = set(libc_pkg_map.values())
499 earlier_libc_pkgs = set()
501 for pkg in self._mergelist:
502 if not isinstance(pkg, Package):
503 # a satisfied blocker
507 if pkg.root in implicit_libc_roots and \
508 pkg.operation == 'merge':
510 earlier_libc_pkgs.add(pkg)
512 my_libc = libc_pkg_map.get(pkg.root)
513 if my_libc is not None and \
514 my_libc in earlier_libc_pkgs:
515 self._digraph.add(my_libc, pkg,
516 priority=DepPriority(buildtime=True))
518 class _pkg_failure(portage.exception.PortageException):
520 An instance of this class is raised by unmerge() when
521 an uninstallation fails.
524 def __init__(self, *pargs):
525 portage.exception.PortageException.__init__(self, pargs)
527 self.status = pargs[0]
529 def _schedule_fetch(self, fetcher):
531 Schedule a fetcher on the fetch queue, in order to
532 serialize access to the fetch log.
534 self._task_queues.fetch.addFront(fetcher)
536 def _schedule_setup(self, setup_phase):
538 Schedule a setup phase on the merge queue, in order to
539 serialize unsandboxed access to the live filesystem.
541 self._task_queues.merge.add(setup_phase)
544 def _schedule_unpack(self, unpack_phase):
546 Schedule an unpack phase on the unpack queue, in order
547 to serialize $DISTDIR access for live ebuilds.
549 self._task_queues.unpack.add(unpack_phase)
551 def _find_blockers(self, new_pkg):
553 Returns a callable which should be called only when
554 the vdb lock has been acquired.
557 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
560 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
561 if self._opts_ignore_blockers.intersection(self.myopts):
564 # Call gc.collect() here to avoid heap overflow that
565 # triggers 'Cannot allocate memory' errors (reported
569 blocker_db = self._blocker_db[new_pkg.root]
572 for blocking_pkg in blocker_db.findInstalledBlockers(
573 new_pkg, acquire_lock=acquire_lock):
574 if new_pkg.slot_atom == blocking_pkg.slot_atom:
576 if new_pkg.cpv == blocking_pkg.cpv:
578 blocker_dblinks.append(portage.dblink(
579 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
580 self.pkgsettings[blocking_pkg.root], treetype="vartree",
581 vartree=self.trees[blocking_pkg.root]["vartree"]))
585 return blocker_dblinks
587 def _dblink_pkg(self, pkg_dblink):
588 cpv = pkg_dblink.mycpv
589 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
590 root_config = self.trees[pkg_dblink.myroot]["root_config"]
591 installed = type_name == "installed"
592 return self._pkg(cpv, type_name, root_config, installed=installed)
594 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
596 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
600 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
602 out_str = out.getvalue()
604 self._task_output(out_str, log_path=log_path)
606 def _dblink_emerge_log(self, msg):
607 self._logger.log(msg)
609 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
610 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
611 background = self._background
614 if not (background and level < logging.WARN):
615 portage.util.writemsg_level(msg,
616 level=level, noiselevel=noiselevel)
618 self._task_output(msg, log_path=log_path)
620 def _dblink_ebuild_phase(self,
621 pkg_dblink, pkg_dbapi, ebuild_path, phase):
623 Using this callback for merge phases allows the scheduler
624 to run while these phases execute asynchronously, and allows
625 the scheduler control output handling.
628 scheduler = self._sched_iface
629 settings = pkg_dblink.settings
630 pkg = self._dblink_pkg(pkg_dblink)
631 background = self._background
632 log_path = settings.get("PORTAGE_LOG_FILE")
634 if phase in ('die_hooks', 'success_hooks'):
635 ebuild_phase = MiscFunctionsProcess(background=background,
636 commands=[phase], phase=phase,
637 scheduler=scheduler, settings=settings)
639 ebuild_phase = EbuildPhase(background=background,
640 phase=phase, scheduler=scheduler,
645 return ebuild_phase.returncode
647 def _generate_digests(self):
649 Generate digests if necessary for --digests or FEATURES=digest.
650 In order to avoid interference, this must done before parallel
654 if '--fetchonly' in self.myopts:
657 digest = '--digest' in self.myopts
659 for pkgsettings in self.pkgsettings.values():
660 if pkgsettings.mycpv is not None:
661 # ensure that we are using global features
662 # settings rather than those from package.env
664 if 'digest' in pkgsettings.features:
671 for x in self._mergelist:
672 if not isinstance(x, Package) or \
673 x.type_name != 'ebuild' or \
674 x.operation != 'merge':
676 pkgsettings = self.pkgsettings[x.root]
677 if pkgsettings.mycpv is not None:
678 # ensure that we are using global features
679 # settings rather than those from package.env
681 if '--digest' not in self.myopts and \
682 'digest' not in pkgsettings.features:
684 portdb = x.root_config.trees['porttree'].dbapi
685 ebuild_path = portdb.findname(x.cpv)
686 if ebuild_path is None:
687 raise AssertionError("ebuild not found for '%s'" % x.cpv)
688 pkgsettings['O'] = os.path.dirname(ebuild_path)
689 if not digestgen(mysettings=pkgsettings, myportdb=portdb):
691 "!!! Unable to generate manifest for '%s'.\n" \
692 % x.cpv, level=logging.ERROR, noiselevel=-1)
697 def _env_sanity_check(self):
699 Verify a sane environment before trying to build anything from source.
702 for x in self._mergelist:
703 if isinstance(x, Package) and not x.built:
710 for settings in self.pkgsettings.values():
711 for var in ("ARCH", ):
712 value = settings.get(var)
713 if value and value.strip():
715 msg = _("%(var)s is not set... "
716 "Are you missing the '%(configroot)setc/make.profile' symlink? "
717 "Is the symlink correct? "
718 "Is your portage tree complete?") % \
719 {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
721 out = portage.output.EOutput()
722 for line in textwrap.wrap(msg, 70):
728 def _check_manifests(self):
729 # Verify all the manifests now so that the user is notified of failure
730 # as soon as possible.
731 if "strict" not in self.settings.features or \
732 "--fetchonly" in self.myopts or \
733 "--fetch-all-uri" in self.myopts:
736 shown_verifying_msg = False
738 for myroot, pkgsettings in self.pkgsettings.items():
739 quiet_config = portage.config(clone=pkgsettings)
740 quiet_config["PORTAGE_QUIET"] = "1"
741 quiet_config.backup_changes("PORTAGE_QUIET")
742 quiet_settings[myroot] = quiet_config
747 for x in self._mergelist:
748 if not isinstance(x, Package) or \
749 x.type_name != "ebuild":
752 if x.operation == "uninstall":
755 if not shown_verifying_msg:
756 shown_verifying_msg = True
757 self._status_msg("Verifying ebuild manifests")
759 root_config = x.root_config
760 portdb = root_config.trees["porttree"].dbapi
761 quiet_config = quiet_settings[root_config.root]
762 ebuild_path = portdb.findname(x.cpv)
763 if ebuild_path is None:
764 raise AssertionError("ebuild not found for '%s'" % x.cpv)
765 quiet_config["O"] = os.path.dirname(ebuild_path)
766 if not digestcheck([], quiet_config, strict=True):
773 def _add_prefetchers(self):
775 if not self._parallel_fetch:
778 if self._parallel_fetch:
779 self._status_msg("Starting parallel fetch")
781 prefetchers = self._prefetchers
782 getbinpkg = "--getbinpkg" in self.myopts
784 # In order to avoid "waiting for lock" messages
785 # at the beginning, which annoy users, never
786 # spawn a prefetcher for the first package.
787 for pkg in self._mergelist[1:]:
788 # mergelist can contain solved Blocker instances
789 if not isinstance(pkg, Package) or pkg.operation == "uninstall":
791 prefetcher = self._create_prefetcher(pkg)
792 if prefetcher is not None:
793 self._task_queues.fetch.add(prefetcher)
794 prefetchers[pkg] = prefetcher
796 def _create_prefetcher(self, pkg):
798 @return: a prefetcher, or None if not applicable
802 if not isinstance(pkg, Package):
805 elif pkg.type_name == "ebuild":
807 prefetcher = EbuildFetcher(background=True,
808 config_pool=self._ConfigPool(pkg.root,
809 self._allocate_config, self._deallocate_config),
810 fetchonly=1, logfile=self._fetch_log,
811 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
813 elif pkg.type_name == "binary" and \
814 "--getbinpkg" in self.myopts and \
815 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
817 prefetcher = BinpkgPrefetcher(background=True,
818 pkg=pkg, scheduler=self._sched_iface)
822 def _is_restart_scheduled(self):
824 Check if the merge list contains a replacement
825 for the current running instance, that will result
826 in restart after merge.
828 @returns: True if a restart is scheduled, False otherwise.
830 if self._opts_no_restart.intersection(self.myopts):
833 mergelist = self._mergelist
835 for i, pkg in enumerate(mergelist):
836 if self._is_restart_necessary(pkg) and \
837 i != len(mergelist) - 1:
842 def _is_restart_necessary(self, pkg):
844 @return: True if merging the given package
845 requires restart, False otherwise.
848 # Figure out if we need a restart.
849 if pkg.root == self._running_root.root and \
850 portage.match_from_list(
851 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
852 if self._running_portage is None:
854 elif pkg.cpv != self._running_portage.cpv or \
855 '9999' in pkg.cpv or \
856 'git' in pkg.inherited:
860 def _restart_if_necessary(self, pkg):
862 Use execv() to restart emerge. This happens
863 if portage upgrades itself and there are
864 remaining packages in the list.
867 if self._opts_no_restart.intersection(self.myopts):
870 if not self._is_restart_necessary(pkg):
873 if pkg == self._mergelist[-1]:
876 self._main_loop_cleanup()
878 logger = self._logger
879 pkg_count = self._pkg_count
880 mtimedb = self._mtimedb
881 bad_resume_opts = self._bad_resume_opts
883 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
884 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
886 logger.log(" *** RESTARTING " + \
887 "emerge via exec() after change of " + \
890 mtimedb["resume"]["mergelist"].remove(list(pkg))
892 portage.run_exitfuncs()
893 # Don't trust sys.argv[0] here because eselect-python may modify it.
894 emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
895 mynewargv = [emerge_binary, "--resume"]
896 resume_opts = self.myopts.copy()
897 # For automatic resume, we need to prevent
898 # any of bad_resume_opts from leaking in
899 # via EMERGE_DEFAULT_OPTS.
900 resume_opts["--ignore-default-opts"] = True
901 for myopt, myarg in resume_opts.items():
902 if myopt not in bad_resume_opts:
904 mynewargv.append(myopt)
906 mynewargv.append(myopt +"="+ str(myarg))
907 # priority only needs to be adjusted on the first run
908 os.environ["PORTAGE_NICENESS"] = "0"
909 os.execv(mynewargv[0], mynewargv)
911 def _run_pkg_pretend(self):
912 shown_verifying_msg = False
916 for x in self._mergelist:
917 if not isinstance(x, Package):
920 if x.operation == "uninstall":
923 if x.metadata["EAPI"] in ("0", "1", "2", "3"):
926 if "pretend" not in x.metadata.defined_phases:
929 if not shown_verifying_msg and self._background:
930 shown_verifying_msg = True
931 self._status_msg("Running pre-merge checks")
933 if not self._background:
934 out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
935 portage.util.writemsg_stdout(out_str, noiselevel=-1)
937 root_config = x.root_config
938 settings = self.pkgsettings[root_config.root]
939 tmpdir = tempfile.mkdtemp()
940 tmpdir_orig = settings["PORTAGE_TMPDIR"]
941 settings["PORTAGE_TMPDIR"] = tmpdir
945 bintree = root_config.trees["bintree"].dbapi.bintree
946 if bintree.isremote(x.cpv):
947 fetcher = BinpkgPrefetcher(background=self._background,
948 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=x, scheduler=self._sched_iface)
952 tbz2_file = bintree.getname(x.cpv)
953 infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
955 portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
956 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
960 portdb = root_config.trees["porttree"].dbapi
961 ebuild_path = portdb.findname(x.cpv)
962 if ebuild_path is None:
963 raise AssertionError("ebuild not found for '%s'" % x.cpv)
965 portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
966 "pretend", root_config.root, settings,
967 debug=(settings.get("PORTAGE_DEBUG", "") == 1),
968 mydbapi=self.trees[settings["ROOT"]][tree].dbapi, use_cache=1)
969 prepare_build_dirs(root_config.root, settings, cleanup=0)
971 vardb = root_config.trees['vartree'].dbapi
972 settings["REPLACING_VERSIONS"] = " ".join(
973 set(portage.versions.cpv_getversion(match) \
974 for match in vardb.match(x.slot_atom) + \
975 vardb.match('='+x.cpv)))
976 pretend_phase = EbuildPhase(background=self._background,
977 phase="pretend", scheduler=self._sched_iface,
980 pretend_phase.start()
981 ret = pretend_phase.wait()
983 portage.elog.elog_process(x.cpv, settings)
986 shutil.rmtree(tmpdir)
987 settings["PORTAGE_TMPDIR"] = tmpdir_orig
996 if "--resume" in self.myopts:
998 portage.writemsg_stdout(
999 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
1000 self._logger.log(" *** Resuming merge...")
1002 self._save_resume_list()
1005 self._background = self._background_mode()
1006 except self._unknown_internal_error:
1009 for root in self.trees:
1010 root_config = self.trees[root]["root_config"]
1012 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
1013 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
1014 # for ensuring sane $PWD (bug #239560) and storing elog messages.
1015 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
1016 if not tmpdir or not os.path.isdir(tmpdir):
1017 msg = "The directory specified in your " + \
1018 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
1019 "does not exist. Please create this " + \
1020 "directory or correct your PORTAGE_TMPDIR setting."
1021 msg = textwrap.wrap(msg, 70)
1022 out = portage.output.EOutput()
1027 if self._background:
1028 root_config.settings.unlock()
1029 root_config.settings["PORTAGE_BACKGROUND"] = "1"
1030 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
1031 root_config.settings.lock()
1033 self.pkgsettings[root] = portage.config(
1034 clone=root_config.settings)
1036 keep_going = "--keep-going" in self.myopts
1037 fetchonly = self._build_opts.fetchonly
1038 mtimedb = self._mtimedb
1039 failed_pkgs = self._failed_pkgs
1041 rval = self._generate_digests()
1042 if rval != os.EX_OK:
1045 rval = self._env_sanity_check()
1046 if rval != os.EX_OK:
1049 # TODO: Immediately recalculate deps here if --keep-going
1050 # is enabled and corrupt manifests are detected.
1051 rval = self._check_manifests()
1052 if rval != os.EX_OK and not keep_going:
1055 rval = self._run_pkg_pretend()
1056 if rval != os.EX_OK:
1060 rval = self._merge()
1061 if rval == os.EX_OK or fetchonly or not keep_going:
1063 if "resume" not in mtimedb:
1065 mergelist = self._mtimedb["resume"].get("mergelist")
1072 for failed_pkg in failed_pkgs:
1073 mergelist.remove(list(failed_pkg.pkg))
1075 self._failed_pkgs_all.extend(failed_pkgs)
1081 # free some memory before creating
1082 # the resume depgraph
1083 self._destroy_installed_graph()
1085 if not self._calc_resume_list():
1088 clear_caches(self.trees)
1089 if not self._mergelist:
1092 # Initialize the installed graph again
1093 # since it was destroyed above in order
1095 self._init_installed_graph()
1096 self._save_resume_list()
1097 self._pkg_count.curval = 0
1098 self._pkg_count.maxval = len([x for x in self._mergelist \
1099 if isinstance(x, Package) and x.operation == "merge"])
1100 self._status_display.maxval = self._pkg_count.maxval
1102 self._logger.log(" *** Finished. Cleaning up...")
1105 self._failed_pkgs_all.extend(failed_pkgs)
1108 printer = portage.output.EOutput()
1109 background = self._background
1110 failure_log_shown = False
1111 if background and len(self._failed_pkgs_all) == 1:
1112 # If only one package failed then just show it's
1113 # whole log for easy viewing.
1114 failed_pkg = self._failed_pkgs_all[-1]
1115 build_dir = failed_pkg.build_dir
1118 log_paths = [failed_pkg.build_log]
1120 log_path = self._locate_failure_log(failed_pkg)
1121 if log_path is not None:
1123 log_file = open(_unicode_encode(log_path,
1124 encoding=_encodings['fs'], errors='strict'), mode='rb')
1128 if log_path.endswith('.gz'):
1129 log_file = gzip.GzipFile(filename='',
1130 mode='rb', fileobj=log_file)
1132 if log_file is not None:
1134 for line in log_file:
1135 writemsg_level(line, noiselevel=-1)
1136 except zlib.error as e:
1137 writemsg_level("%s\n" % (e,), level=logging.ERROR,
1141 failure_log_shown = True
1143 # Dump mod_echo output now since it tends to flood the terminal.
1144 # This allows us to avoid having more important output, generated
1145 # later, from being swept away by the mod_echo output.
1146 mod_echo_output = _flush_elog_mod_echo()
1148 if background and not failure_log_shown and \
1149 self._failed_pkgs_all and \
1150 self._failed_pkgs_die_msgs and \
1151 not mod_echo_output:
1153 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1155 if mysettings["ROOT"] != "/":
1156 root_msg = " merged to %s" % mysettings["ROOT"]
1158 printer.einfo("Error messages for package %s%s:" % \
1159 (colorize("INFORM", key), root_msg))
1161 for phase in portage.const.EBUILD_PHASES:
1162 if phase not in logentries:
1164 for msgtype, msgcontent in logentries[phase]:
1165 if isinstance(msgcontent, basestring):
1166 msgcontent = [msgcontent]
1167 for line in msgcontent:
1168 printer.eerror(line.strip("\n"))
1170 if self._post_mod_echo_msgs:
1171 for msg in self._post_mod_echo_msgs:
1174 if len(self._failed_pkgs_all) > 1 or \
1175 (self._failed_pkgs_all and keep_going):
1176 if len(self._failed_pkgs_all) > 1:
1177 msg = "The following %d packages have " % \
1178 len(self._failed_pkgs_all) + \
1179 "failed to build or install:"
1181 msg = "The following package has " + \
1182 "failed to build or install:"
1185 for line in textwrap.wrap(msg, 72):
1186 printer.eerror(line)
1188 for failed_pkg in self._failed_pkgs_all:
1189 msg = " %s" % (colorize('INFORM', failed_pkg.pkg.__str__()),)
1190 log_path = self._locate_failure_log(failed_pkg)
1191 if log_path is not None:
1192 msg += ", Log file:"
1194 if log_path is not None:
1195 printer.eerror(" '%s'" % colorize('INFORM', log_path))
1198 if self._failed_pkgs_all:
1202 def _elog_listener(self, mysettings, key, logentries, fulltext):
1203 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1205 self._failed_pkgs_die_msgs.append(
1206 (mysettings, key, errors))
1208 def _locate_failure_log(self, failed_pkg):
1210 build_dir = failed_pkg.build_dir
1213 log_paths = [failed_pkg.build_log]
1215 for log_path in log_paths:
1220 log_size = os.stat(log_path).st_size
1231 def _add_packages(self):
1232 pkg_queue = self._pkg_queue
1233 for pkg in self._mergelist:
1234 if isinstance(pkg, Package):
1235 pkg_queue.append(pkg)
1236 elif isinstance(pkg, Blocker):
1239 def _system_merge_started(self, merge):
1241 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1243 graph = self._digraph
1246 pkg = merge.merge.pkg
1248 # Skip this if $ROOT != / since it shouldn't matter if there
1249 # are unsatisfied system runtime deps in this case.
1253 completed_tasks = self._completed_tasks
1254 unsatisfied = self._unsatisfied_system_deps
1256 def ignore_non_runtime_or_satisfied(priority):
1258 Ignore non-runtime and satisfied runtime priorities.
1260 if isinstance(priority, DepPriority) and \
1261 not priority.satisfied and \
1262 (priority.runtime or priority.runtime_post):
1266 # When checking for unsatisfied runtime deps, only check
1267 # direct deps since indirect deps are checked when the
1268 # corresponding parent is merged.
1269 for child in graph.child_nodes(pkg,
1270 ignore_priority=ignore_non_runtime_or_satisfied):
1271 if not isinstance(child, Package) or \
1272 child.operation == 'uninstall':
1276 if child.operation == 'merge' and \
1277 child not in completed_tasks:
1278 unsatisfied.add(child)
1280 def _merge_wait_exit_handler(self, task):
1281 self._merge_wait_scheduled.remove(task)
1282 self._merge_exit(task)
1284 def _merge_exit(self, merge):
1285 self._do_merge_exit(merge)
1286 self._deallocate_config(merge.merge.settings)
1287 if merge.returncode == os.EX_OK and \
1288 not merge.merge.pkg.installed:
1289 self._status_display.curval += 1
1290 self._status_display.merges = len(self._task_queues.merge)
1293 def _do_merge_exit(self, merge):
1294 pkg = merge.merge.pkg
1295 self._running_tasks.remove(pkg)
1296 if merge.returncode != os.EX_OK:
1297 settings = merge.merge.settings
1298 build_dir = settings.get("PORTAGE_BUILDDIR")
1299 build_log = settings.get("PORTAGE_LOG_FILE")
1301 self._failed_pkgs.append(self._failed_pkg(
1302 build_dir=build_dir, build_log=build_log,
1304 returncode=merge.returncode))
1305 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1307 self._status_display.failed = len(self._failed_pkgs)
1310 self._task_complete(pkg)
1311 pkg_to_replace = merge.merge.pkg_to_replace
1312 if pkg_to_replace is not None:
1313 # When a package is replaced, mark it's uninstall
1314 # task complete (if any).
1316 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
1317 self._task_complete(uninst_hash_key)
1322 self._restart_if_necessary(pkg)
1324 # Call mtimedb.commit() after each merge so that
1325 # --resume still works after being interrupted
1326 # by reboot, sigkill or similar.
1327 mtimedb = self._mtimedb
1328 mtimedb["resume"]["mergelist"].remove(list(pkg))
1329 if not mtimedb["resume"]["mergelist"]:
1330 del mtimedb["resume"]
1333 def _build_exit(self, build):
1334 if build.returncode == os.EX_OK:
1336 merge = PackageMerge(merge=build)
1337 if not build.build_opts.buildpkgonly and \
1338 build.pkg in self._deep_system_deps:
1339 # Since dependencies on system packages are frequently
1340 # unspecified, merge them only when no builds are executing.
1341 self._merge_wait_queue.append(merge)
1342 merge.addStartListener(self._system_merge_started)
1344 merge.addExitListener(self._merge_exit)
1345 self._task_queues.merge.add(merge)
1346 self._status_display.merges = len(self._task_queues.merge)
1348 self._running_tasks.remove(build.pkg)
1349 settings = build.settings
1350 build_dir = settings.get("PORTAGE_BUILDDIR")
1351 build_log = settings.get("PORTAGE_LOG_FILE")
1353 self._failed_pkgs.append(self._failed_pkg(
1354 build_dir=build_dir, build_log=build_log,
1356 returncode=build.returncode))
1357 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1359 self._status_display.failed = len(self._failed_pkgs)
1360 self._deallocate_config(build.settings)
1362 self._status_display.running = self._jobs
1365 def _extract_exit(self, build):
1366 self._build_exit(build)
1368 def _task_complete(self, pkg):
1369 self._completed_tasks.add(pkg)
1370 self._unsatisfied_system_deps.discard(pkg)
1371 self._choose_pkg_return_early = False
1375 self._add_prefetchers()
1376 self._add_packages()
1377 pkg_queue = self._pkg_queue
1378 failed_pkgs = self._failed_pkgs
1379 portage.locks._quiet = self._background
1380 portage.elog.add_listener(self._elog_listener)
1386 self._main_loop_cleanup()
1387 portage.locks._quiet = False
1388 portage.elog.remove_listener(self._elog_listener)
1390 rval = failed_pkgs[-1].returncode
1394 def _main_loop_cleanup(self):
1395 del self._pkg_queue[:]
1396 self._completed_tasks.clear()
1397 self._deep_system_deps.clear()
1398 self._unsatisfied_system_deps.clear()
1399 self._choose_pkg_return_early = False
1400 self._status_display.reset()
1401 self._digraph = None
1402 self._task_queues.fetch.clear()
1404 def _choose_pkg(self):
1406 Choose a task that has all it's dependencies satisfied.
1409 if self._choose_pkg_return_early:
1412 if self._digraph is None:
1413 if self._is_work_scheduled() and \
1414 not ("--nodeps" in self.myopts and \
1415 (self._max_jobs is True or self._max_jobs > 1)):
1416 self._choose_pkg_return_early = True
1418 return self._pkg_queue.pop(0)
1420 if not self._is_work_scheduled():
1421 return self._pkg_queue.pop(0)
1423 self._prune_digraph()
1427 # Prefer uninstall operations when available.
1428 graph = self._digraph
1429 for pkg in self._pkg_queue:
1430 if pkg.operation == 'uninstall' and \
1431 not graph.child_nodes(pkg):
1435 if chosen_pkg is None:
1436 later = set(self._pkg_queue)
1437 for pkg in self._pkg_queue:
1439 if not self._dependent_on_scheduled_merges(pkg, later):
1443 if chosen_pkg is not None:
1444 self._pkg_queue.remove(chosen_pkg)
1446 if chosen_pkg is None:
1447 # There's no point in searching for a package to
1448 # choose until at least one of the existing jobs
1450 self._choose_pkg_return_early = True
1454 def _dependent_on_scheduled_merges(self, pkg, later):
1456 Traverse the subgraph of the given packages deep dependencies
1457 to see if it contains any scheduled merges.
1458 @param pkg: a package to check dependencies for
1460 @param later: packages for which dependence should be ignored
1461 since they will be merged later than pkg anyway and therefore
1462 delaying the merge of pkg will not result in a more optimal
1466 @returns: True if the package is dependent, False otherwise.
1469 graph = self._digraph
1470 completed_tasks = self._completed_tasks
1473 traversed_nodes = set([pkg])
1474 direct_deps = graph.child_nodes(pkg)
1475 node_stack = direct_deps
1476 direct_deps = frozenset(direct_deps)
1478 node = node_stack.pop()
1479 if node in traversed_nodes:
1481 traversed_nodes.add(node)
1482 if not ((node.installed and node.operation == "nomerge") or \
1483 (node.operation == "uninstall" and \
1484 node not in direct_deps) or \
1485 node in completed_tasks or \
1490 # Don't traverse children of uninstall nodes since
1491 # those aren't dependencies in the usual sense.
1492 if node.operation != "uninstall":
1493 node_stack.extend(graph.child_nodes(node))
1497 def _allocate_config(self, root):
1499 Allocate a unique config instance for a task in order
1500 to prevent interference between parallel tasks.
1502 if self._config_pool[root]:
1503 temp_settings = self._config_pool[root].pop()
1505 temp_settings = portage.config(clone=self.pkgsettings[root])
1506 # Since config.setcpv() isn't guaranteed to call config.reset() due to
1507 # performance reasons, call it here to make sure all settings from the
1508 # previous package get flushed out (such as PORTAGE_LOG_FILE).
1509 temp_settings.reload()
1510 temp_settings.reset()
1511 return temp_settings
1513 def _deallocate_config(self, settings):
1514 self._config_pool[settings["ROOT"]].append(settings)
1516 def _main_loop(self):
1518 # Only allow 1 job max if a restart is scheduled
1519 # due to portage update.
1520 if self._is_restart_scheduled() or \
1521 self._opts_no_background.intersection(self.myopts):
1522 self._set_max_jobs(1)
1524 while self._schedule():
1525 if self._poll_event_handlers:
1530 if not self._is_work_scheduled():
1532 if self._poll_event_handlers:
1535 def _keep_scheduling(self):
1536 return bool(self._pkg_queue and \
1537 not (self._failed_pkgs and not self._build_opts.fetchonly))
1539 def _is_work_scheduled(self):
1540 return bool(self._running_tasks)
1542 def _schedule_tasks(self):
1546 # When the number of jobs drops to zero, process all waiting merges.
1547 if not self._jobs and self._merge_wait_queue:
1548 for task in self._merge_wait_queue:
1549 task.addExitListener(self._merge_wait_exit_handler)
1550 self._task_queues.merge.add(task)
1551 self._status_display.merges = len(self._task_queues.merge)
1552 self._merge_wait_scheduled.extend(self._merge_wait_queue)
1553 del self._merge_wait_queue[:]
1555 self._schedule_tasks_imp()
1556 self._status_display.display()
1559 for q in self._task_queues.values():
1563 # Cancel prefetchers if they're the only reason
1564 # the main poll loop is still running.
1565 if self._failed_pkgs and not self._build_opts.fetchonly and \
1566 not self._is_work_scheduled() and \
1567 self._task_queues.fetch:
1568 self._task_queues.fetch.clear()
1571 if not (state_change or \
1572 (not self._jobs and self._merge_wait_queue)):
1575 return self._keep_scheduling()
1577 def _job_delay(self):
1580 @returns: True if job scheduling should be delayed, False otherwise.
1583 if self._jobs and self._max_load is not None:
1585 current_time = time.time()
1587 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1588 if delay > self._job_delay_max:
1589 delay = self._job_delay_max
1590 if (current_time - self._previous_job_start_time) < delay:
1595 def _schedule_tasks_imp(self):
1598 @returns: True if state changed, False otherwise.
1605 if not self._keep_scheduling():
1606 return bool(state_change)
1608 if self._choose_pkg_return_early or \
1609 self._merge_wait_scheduled or \
1610 (self._jobs and self._unsatisfied_system_deps) or \
1611 not self._can_add_job() or \
1613 return bool(state_change)
1615 pkg = self._choose_pkg()
1617 return bool(state_change)
1621 if not pkg.installed:
1622 self._pkg_count.curval += 1
1624 task = self._task(pkg)
1625 self._running_tasks.add(pkg)
1628 merge = PackageMerge(merge=task)
1629 merge.addExitListener(self._merge_exit)
1630 self._task_queues.merge.addFront(merge)
1634 self._previous_job_start_time = time.time()
1635 self._status_display.running = self._jobs
1636 task.addExitListener(self._extract_exit)
1637 self._task_queues.jobs.add(task)
1641 self._previous_job_start_time = time.time()
1642 self._status_display.running = self._jobs
1643 task.addExitListener(self._build_exit)
1644 self._task_queues.jobs.add(task)
1646 return bool(state_change)
1648 def _task(self, pkg):
1650 pkg_to_replace = None
1651 if pkg.operation != "uninstall":
1652 vardb = pkg.root_config.trees["vartree"].dbapi
1653 previous_cpv = vardb.match(pkg.slot_atom)
1655 previous_cpv = previous_cpv.pop()
1656 pkg_to_replace = self._pkg(previous_cpv,
1657 "installed", pkg.root_config, installed=True)
1659 task = MergeListItem(args_set=self._args_set,
1660 background=self._background, binpkg_opts=self._binpkg_opts,
1661 build_opts=self._build_opts,
1662 config_pool=self._ConfigPool(pkg.root,
1663 self._allocate_config, self._deallocate_config),
1664 emerge_opts=self.myopts,
1665 find_blockers=self._find_blockers(pkg), logger=self._logger,
1666 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1667 pkg_to_replace=pkg_to_replace,
1668 prefetcher=self._prefetchers.get(pkg),
1669 scheduler=self._sched_iface,
1670 settings=self._allocate_config(pkg.root),
1671 statusMessage=self._status_msg,
1672 world_atom=self._world_atom)
1676 def _failed_pkg_msg(self, failed_pkg, action, preposition):
1677 pkg = failed_pkg.pkg
1678 msg = "%s to %s %s" % \
1679 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1681 msg += " %s %s" % (preposition, pkg.root)
1683 log_path = self._locate_failure_log(failed_pkg)
1684 if log_path is not None:
1685 msg += ", Log file:"
1686 self._status_msg(msg)
1688 if log_path is not None:
1689 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1691 def _status_msg(self, msg):
1693 Display a brief status message (no newlines) in the status display.
1694 This is called by tasks to provide feedback to the user. This
1695 delegates the resposibility of generating \r and \n control characters,
1696 to guarantee that lines are created or erased when necessary and
1700 @param msg: a brief status message (no newlines allowed)
1702 if not self._background:
1703 writemsg_level("\n")
1704 self._status_display.displayMessage(msg)
1706 def _save_resume_list(self):
1708 Do this before verifying the ebuild Manifests since it might
1709 be possible for the user to use --resume --skipfirst get past
1710 a non-essential package with a broken digest.
1712 mtimedb = self._mtimedb
1714 mtimedb["resume"] = {}
1715 # Stored as a dict starting with portage-2.1.6_rc1, and supported
1716 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
1717 # a list type for options.
1718 mtimedb["resume"]["myopts"] = self.myopts.copy()
1720 # Convert Atom instances to plain str.
1721 mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
1722 mtimedb["resume"]["mergelist"] = [list(x) \
1723 for x in self._mergelist \
1724 if isinstance(x, Package) and x.operation == "merge"]
1728 def _calc_resume_list(self):
1730 Use the current resume list to calculate a new one,
1731 dropping any packages with unsatisfied deps.
1733 @returns: True if successful, False otherwise.
1735 print(colorize("GOOD", "*** Resuming merge..."))
1737 myparams = create_depgraph_params(self.myopts, None)
1741 success, mydepgraph, dropped_tasks = resume_depgraph(
1742 self.settings, self.trees, self._mtimedb, self.myopts,
1743 myparams, self._spinner)
1744 except depgraph.UnsatisfiedResumeDep as exc:
1745 # rename variable to avoid python-3.0 error:
1746 # SyntaxError: can not delete variable 'e' referenced in nested
1749 mydepgraph = e.depgraph
1750 dropped_tasks = set()
1753 def unsatisfied_resume_dep_msg():
1754 mydepgraph.display_problems()
1755 out = portage.output.EOutput()
1756 out.eerror("One or more packages are either masked or " + \
1757 "have missing dependencies:")
1760 show_parents = set()
1762 if dep.parent in show_parents:
1764 show_parents.add(dep.parent)
1765 if dep.atom is None:
1766 out.eerror(indent + "Masked package:")
1767 out.eerror(2 * indent + str(dep.parent))
1770 out.eerror(indent + str(dep.atom) + " pulled in by:")
1771 out.eerror(2 * indent + str(dep.parent))
1773 msg = "The resume list contains packages " + \
1774 "that are either masked or have " + \
1775 "unsatisfied dependencies. " + \
1776 "Please restart/continue " + \
1777 "the operation manually, or use --skipfirst " + \
1778 "to skip the first package in the list and " + \
1779 "any other packages that may be " + \
1780 "masked or have missing dependencies."
1781 for line in textwrap.wrap(msg, 72):
1783 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1786 if success and self._show_list():
1787 mylist = mydepgraph.altlist()
1789 if "--tree" in self.myopts:
1791 mydepgraph.display(mylist, favorites=self._favorites)
1794 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1796 mydepgraph.display_problems()
1798 mylist = mydepgraph.altlist()
1799 mydepgraph.break_refs(mylist)
1800 mydepgraph.break_refs(dropped_tasks)
1801 self._mergelist = mylist
1802 self._set_digraph(mydepgraph.schedulerGraph())
1805 for task in dropped_tasks:
1806 if not (isinstance(task, Package) and task.operation == "merge"):
1809 msg = "emerge --keep-going:" + \
1812 msg += " for %s" % (pkg.root,)
1813 msg += " dropped due to unsatisfied dependency."
1814 for line in textwrap.wrap(msg, msg_width):
1815 eerror(line, phase="other", key=pkg.cpv)
1816 settings = self.pkgsettings[pkg.root]
1817 # Ensure that log collection from $T is disabled inside
1818 # elog_process(), since any logs that might exist are
1820 settings.pop("T", None)
1821 portage.elog.elog_process(pkg.cpv, settings)
1822 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1826 def _show_list(self):
1827 myopts = self.myopts
1828 if "--quiet" not in myopts and \
1829 ("--ask" in myopts or "--tree" in myopts or \
1830 "--verbose" in myopts):
1834 def _world_atom(self, pkg):
1836 Add or remove the package to the world file, but only if
1837 it's supposed to be added or removed. Otherwise, do nothing.
1840 if set(("--buildpkgonly", "--fetchonly",
1842 "--oneshot", "--onlydeps",
1843 "--pretend")).intersection(self.myopts):
1846 if pkg.root != self.target_root:
1849 args_set = self._args_set
1850 if not args_set.findAtomForPackage(pkg):
1853 logger = self._logger
1854 pkg_count = self._pkg_count
1855 root_config = pkg.root_config
1856 world_set = root_config.sets["selected"]
1857 world_locked = False
1858 if hasattr(world_set, "lock"):
1863 if hasattr(world_set, "load"):
1864 world_set.load() # maybe it's changed on disk
1866 if pkg.operation == "uninstall":
1867 if hasattr(world_set, "cleanPackage"):
1868 world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
1870 if hasattr(world_set, "remove"):
1871 for s in pkg.root_config.setconfig.active:
1872 world_set.remove(SETPREFIX+s)
1874 atom = create_world_atom(pkg, args_set, root_config)
1876 if hasattr(world_set, "add"):
1877 self._status_msg(('Recording %s in "world" ' + \
1878 'favorites file...') % atom)
1879 logger.log(" === (%s of %s) Updating world file (%s)" % \
1880 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1883 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1884 (atom,), level=logging.WARN, noiselevel=-1)
1889 def _pkg(self, cpv, type_name, root_config, installed=False):
1891 Get a package instance from the cache, or create a new
1892 one if necessary. Raises KeyError from aux_get if it
1893 failures for some reason (package does not exist or is
1898 operation = "nomerge"
1900 if self._digraph is not None:
1901 # Reuse existing instance when available.
1902 pkg = self._digraph.get(
1903 (type_name, root_config.root, cpv, operation))
1907 tree_type = depgraph.pkg_tree_map[type_name]
1908 db = root_config.trees[tree_type].dbapi
1909 db_keys = list(self.trees[root_config.root][
1910 tree_type].dbapi._aux_cache_keys)
1911 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
1912 return Package(built=(type_name != 'ebuild'),
1913 cpv=cpv, metadata=metadata,
1914 root_config=root_config, installed=installed)