1 # Copyright 1999-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
6 from collections import deque
19 from portage import os
20 from portage import _encodings
21 from portage import _unicode_decode, _unicode_encode
22 from portage.cache.mappings import slot_dict_class
23 from portage.elog.messages import eerror
24 from portage.localization import _
25 from portage.output import colorize, create_color_func, red
26 bad = create_color_func("BAD")
27 from portage._sets import SETPREFIX
28 from portage._sets.base import InternalPackageSet
29 from portage.util import ensure_dirs, writemsg, writemsg_level
30 from portage.package.ebuild.digestcheck import digestcheck
31 from portage.package.ebuild.digestgen import digestgen
32 from portage.package.ebuild.doebuild import (_check_temp_dir,
34 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
37 from _emerge.BinpkgFetcher import BinpkgFetcher
38 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
39 from _emerge.BinpkgVerifier import BinpkgVerifier
40 from _emerge.Blocker import Blocker
41 from _emerge.BlockerDB import BlockerDB
42 from _emerge.clear_caches import clear_caches
43 from _emerge.create_depgraph_params import create_depgraph_params
44 from _emerge.create_world_atom import create_world_atom
45 from _emerge.DepPriority import DepPriority
46 from _emerge.depgraph import depgraph, resume_depgraph
47 from _emerge.EbuildBuildDir import EbuildBuildDir
48 from _emerge.EbuildFetcher import EbuildFetcher
49 from _emerge.EbuildPhase import EbuildPhase
50 from _emerge.emergelog import emergelog
51 from _emerge.FakeVartree import FakeVartree
52 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
53 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
54 from _emerge.JobStatusDisplay import JobStatusDisplay
55 from _emerge.MergeListItem import MergeListItem
56 from _emerge.Package import Package
57 from _emerge.PackageMerge import PackageMerge
58 from _emerge.PollScheduler import PollScheduler
59 from _emerge.SlotObject import SlotObject
60 from _emerge.SequentialTaskQueue import SequentialTaskQueue
62 if sys.hexversion >= 0x3000000:
65 class Scheduler(PollScheduler):
67 # max time between display status updates (milliseconds)
68 _max_display_latency = 3000
70 _opts_ignore_blockers = \
71 frozenset(["--buildpkgonly",
72 "--fetchonly", "--fetch-all-uri",
73 "--nodeps", "--pretend"])
75 _opts_no_background = \
76 frozenset(["--pretend",
77 "--fetchonly", "--fetch-all-uri"])
79 _opts_no_self_update = frozenset(["--buildpkgonly",
80 "--fetchonly", "--fetch-all-uri", "--pretend"])
82 class _iface_class(PollScheduler._sched_iface_class):
84 "scheduleSetup", "scheduleUnpack")
86 class _fetch_iface_class(SlotObject):
87 __slots__ = ("log_file", "schedule")
89 _task_queues_class = slot_dict_class(
90 ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
92 class _build_opts_class(SlotObject):
93 __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
94 "fetch_all_uri", "fetchonly", "pretend")
96 class _binpkg_opts_class(SlotObject):
97 __slots__ = ("fetchonly", "getbinpkg", "pretend")
99 class _pkg_count_class(SlotObject):
100 __slots__ = ("curval", "maxval")
102 class _emerge_log_class(SlotObject):
103 __slots__ = ("xterm_titles",)
105 def log(self, *pargs, **kwargs):
106 if not self.xterm_titles:
107 # Avoid interference with the scheduler's status display.
108 kwargs.pop("short_msg", None)
109 emergelog(self.xterm_titles, *pargs, **kwargs)
111 class _failed_pkg(SlotObject):
112 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
114 class _ConfigPool(object):
115 """Interface for a task to temporarily allocate a config
116 instance from a pool. This allows a task to be constructed
117 long before the config instance actually becomes needed, like
118 when prefetchers are constructed for the whole merge list."""
119 __slots__ = ("_root", "_allocate", "_deallocate")
120 def __init__(self, root, allocate, deallocate):
122 self._allocate = allocate
123 self._deallocate = deallocate
125 return self._allocate(self._root)
126 def deallocate(self, settings):
127 self._deallocate(settings)
129 class _unknown_internal_error(portage.exception.PortageException):
131 Used internally to terminate scheduling. The specific reason for
132 the failure should have been dumped to stderr.
134 def __init__(self, value=""):
135 portage.exception.PortageException.__init__(self, value)
137 def __init__(self, settings, trees, mtimedb, myopts,
138 spinner, mergelist=None, favorites=None, graph_config=None):
139 PollScheduler.__init__(self)
141 if mergelist is not None:
142 warnings.warn("The mergelist parameter of the " + \
143 "_emerge.Scheduler constructor is now unused. Use " + \
144 "the graph_config parameter instead.",
145 DeprecationWarning, stacklevel=2)
147 self.settings = settings
148 self.target_root = settings["EROOT"]
151 self._spinner = spinner
152 self._mtimedb = mtimedb
153 self._favorites = favorites
154 self._args_set = InternalPackageSet(favorites, allow_repo=True)
155 self._build_opts = self._build_opts_class()
157 for k in self._build_opts.__slots__:
158 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
159 self._build_opts.buildpkg_exclude = InternalPackageSet( \
160 initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
161 allow_wildcard=True, allow_repo=True)
163 self._binpkg_opts = self._binpkg_opts_class()
164 for k in self._binpkg_opts.__slots__:
165 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
168 self._logger = self._emerge_log_class()
169 self._task_queues = self._task_queues_class()
170 for k in self._task_queues.allowed_keys:
171 setattr(self._task_queues, k,
172 SequentialTaskQueue())
174 # Holds merges that will wait to be executed when no builds are
175 # executing. This is useful for system packages since dependencies
176 # on system packages are frequently unspecified. For example, see
178 self._merge_wait_queue = deque()
179 # Holds merges that have been transfered from the merge_wait_queue to
180 # the actual merge queue. They are removed from this list upon
181 # completion. Other packages can start building only when this list is
183 self._merge_wait_scheduled = []
185 # Holds system packages and their deep runtime dependencies. Before
186 # being merged, these packages go to merge_wait_queue, to be merged
187 # when no other packages are building.
188 self._deep_system_deps = set()
190 # Holds packages to merge which will satisfy currently unsatisfied
191 # deep runtime dependencies of system packages. If this is not empty
192 # then no parallel builds will be spawned until it is empty. This
193 # minimizes the possibility that a build will fail due to the system
194 # being in a fragile state. For example, see bug #259954.
195 self._unsatisfied_system_deps = set()
197 self._status_display = JobStatusDisplay(
198 xterm_titles=('notitles' not in settings.features))
199 self.sched_iface.timeout_add(self._max_display_latency,
200 self._status_display.display)
201 self._max_load = myopts.get("--load-average")
202 max_jobs = myopts.get("--jobs")
205 self._set_max_jobs(max_jobs)
206 self._running_root = trees[trees._running_eroot]["root_config"]
208 if settings.get("PORTAGE_DEBUG", "") == "1":
210 self.pkgsettings = {}
211 self._config_pool = {}
212 for root in self.trees:
213 self._config_pool[root] = []
215 self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
217 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
218 schedule=self._schedule_fetch)
219 self._sched_iface = self._iface_class(
220 fetch=fetch_iface, output=self._task_output,
221 idle_add=self._event_loop.idle_add,
222 io_add_watch=self._event_loop.io_add_watch,
223 iteration=self._event_loop.iteration,
224 register=self._event_loop.io_add_watch,
225 schedule=self._event_loop._poll_loop,
226 scheduleSetup=self._schedule_setup,
227 scheduleUnpack=self._schedule_unpack,
228 source_remove=self._event_loop.source_remove,
229 timeout_add=self._event_loop.timeout_add,
230 unregister=self._event_loop.source_remove)
232 self._prefetchers = weakref.WeakValueDictionary()
234 self._running_tasks = {}
235 self._completed_tasks = set()
237 self._failed_pkgs = []
238 self._failed_pkgs_all = []
239 self._failed_pkgs_die_msgs = []
240 self._post_mod_echo_msgs = []
241 self._parallel_fetch = False
242 self._init_graph(graph_config)
243 merge_count = len([x for x in self._mergelist \
244 if isinstance(x, Package) and x.operation == "merge"])
245 self._pkg_count = self._pkg_count_class(
246 curval=0, maxval=merge_count)
247 self._status_display.maxval = self._pkg_count.maxval
249 # The load average takes some time to respond when new
250 # jobs are added, so we need to limit the rate of adding
252 self._job_delay_max = 10
253 self._job_delay_factor = 1.0
254 self._job_delay_exp = 1.5
255 self._previous_job_start_time = None
257 # This is used to memoize the _choose_pkg() result when
258 # no packages can be chosen until one of the existing
260 self._choose_pkg_return_early = False
262 features = self.settings.features
263 if "parallel-fetch" in features and \
264 not ("--pretend" in self.myopts or \
265 "--fetch-all-uri" in self.myopts or \
266 "--fetchonly" in self.myopts):
267 if "distlocks" not in features:
268 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
269 portage.writemsg(red("!!!")+" parallel-fetching " + \
270 "requires the distlocks feature enabled"+"\n",
272 portage.writemsg(red("!!!")+" you have it disabled, " + \
273 "thus parallel-fetching is being disabled"+"\n",
275 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
276 elif merge_count > 1:
277 self._parallel_fetch = True
279 if self._parallel_fetch:
280 # clear out existing fetch log if it exists
282 open(self._fetch_log, 'w').close()
283 except EnvironmentError:
286 self._running_portage = None
287 portage_match = self._running_root.trees["vartree"].dbapi.match(
288 portage.const.PORTAGE_PACKAGE_ATOM)
290 cpv = portage_match.pop()
291 self._running_portage = self._pkg(cpv, "installed",
292 self._running_root, installed=True)
294 def _handle_self_update(self):
296 if self._opts_no_self_update.intersection(self.myopts):
299 for x in self._mergelist:
300 if not isinstance(x, Package):
302 if x.operation != "merge":
304 if x.root != self._running_root.root:
306 if not portage.dep.match_from_list(
307 portage.const.PORTAGE_PACKAGE_ATOM, [x]):
309 if self._running_portage is None or \
310 self._running_portage.cpv != x.cpv or \
312 'git' in x.inherited or \
313 'git-2' in x.inherited:
314 rval = _check_temp_dir(self.settings)
317 _prepare_self_update(self.settings)
322 def _terminate_tasks(self):
323 self._status_display.quiet = True
324 while self._running_tasks:
325 task_id, task = self._running_tasks.popitem()
327 for q in self._task_queues.values():
330 def _init_graph(self, graph_config):
332 Initialization structures used for dependency calculations
333 involving currently installed packages.
335 self._set_graph_config(graph_config)
336 self._blocker_db = {}
337 dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
338 for root in self.trees:
339 if graph_config is None:
340 fake_vartree = FakeVartree(self.trees[root]["root_config"],
341 pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps)
344 fake_vartree = graph_config.trees[root]['vartree']
345 self._blocker_db[root] = BlockerDB(fake_vartree)
347 def _destroy_graph(self):
349 Use this to free memory at the beginning of _calc_resume_list().
350 After _calc_resume_list(), the _init_graph() method
351 must to be called in order to re-generate the structures that
352 this method destroys.
354 self._blocker_db = None
355 self._set_graph_config(None)
358 def _set_max_jobs(self, max_jobs):
359 self._max_jobs = max_jobs
360 self._task_queues.jobs.max_jobs = max_jobs
361 if "parallel-install" in self.settings.features:
362 self._task_queues.merge.max_jobs = max_jobs
364 def _background_mode(self):
366 Check if background mode is enabled and adjust states as necessary.
369 @returns: True if background mode is enabled, False otherwise.
371 background = (self._max_jobs is True or \
372 self._max_jobs > 1 or "--quiet" in self.myopts \
373 or self.myopts.get("--quiet-build") == "y") and \
374 not bool(self._opts_no_background.intersection(self.myopts))
377 interactive_tasks = self._get_interactive_tasks()
378 if interactive_tasks:
380 writemsg_level(">>> Sending package output to stdio due " + \
381 "to interactive package(s):\n",
382 level=logging.INFO, noiselevel=-1)
384 for pkg in interactive_tasks:
385 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
386 if pkg.root_config.settings["ROOT"] != "/":
387 pkg_str += " for " + pkg.root
390 writemsg_level("".join("%s\n" % (l,) for l in msg),
391 level=logging.INFO, noiselevel=-1)
392 if self._max_jobs is True or self._max_jobs > 1:
393 self._set_max_jobs(1)
394 writemsg_level(">>> Setting --jobs=1 due " + \
395 "to the above interactive package(s)\n",
396 level=logging.INFO, noiselevel=-1)
397 writemsg_level(">>> In order to temporarily mask " + \
398 "interactive updates, you may\n" + \
399 ">>> specify --accept-properties=-interactive\n",
400 level=logging.INFO, noiselevel=-1)
401 self._status_display.quiet = \
403 ("--quiet" in self.myopts and \
404 "--verbose" not in self.myopts)
406 self._logger.xterm_titles = \
407 "notitles" not in self.settings.features and \
408 self._status_display.quiet
412 def _get_interactive_tasks(self):
413 interactive_tasks = []
414 for task in self._mergelist:
415 if not (isinstance(task, Package) and \
416 task.operation == "merge"):
418 if 'interactive' in task.metadata.properties:
419 interactive_tasks.append(task)
420 return interactive_tasks
422 def _set_graph_config(self, graph_config):
424 if graph_config is None:
425 self._graph_config = None
429 self._deep_system_deps.clear()
432 self._graph_config = graph_config
433 self._pkg_cache = graph_config.pkg_cache
434 self._digraph = graph_config.graph
435 self._mergelist = graph_config.mergelist
437 if "--nodeps" in self.myopts or \
438 (self._max_jobs is not True and self._max_jobs < 2):
441 graph_config.graph = None
442 graph_config.pkg_cache.clear()
443 self._deep_system_deps.clear()
444 for pkg in self._mergelist:
445 self._pkg_cache[pkg] = pkg
448 self._find_system_deps()
449 self._prune_digraph()
450 self._prevent_builddir_collisions()
451 if '--debug' in self.myopts:
452 writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
453 self._digraph.debug_print()
454 writemsg("\n", noiselevel=-1)
456 def _find_system_deps(self):
458 Find system packages and their deep runtime dependencies. Before being
459 merged, these packages go to merge_wait_queue, to be merged when no
460 other packages are building.
461 NOTE: This can only find deep system deps if the system set has been
462 added to the graph and traversed deeply (the depgraph "complete"
463 parameter will do this, triggered by emerge --complete-graph option).
465 deep_system_deps = self._deep_system_deps
466 deep_system_deps.clear()
467 deep_system_deps.update(
468 _find_deep_system_runtime_deps(self._digraph))
469 deep_system_deps.difference_update([pkg for pkg in \
470 deep_system_deps if pkg.operation != "merge"])
472 def _prune_digraph(self):
474 Prune any root nodes that are irrelevant.
477 graph = self._digraph
478 completed_tasks = self._completed_tasks
479 removed_nodes = set()
481 for node in graph.root_nodes():
482 if not isinstance(node, Package) or \
483 (node.installed and node.operation == "nomerge") or \
485 node in completed_tasks:
486 removed_nodes.add(node)
488 graph.difference_update(removed_nodes)
489 if not removed_nodes:
491 removed_nodes.clear()
493 def _prevent_builddir_collisions(self):
495 When building stages, sometimes the same exact cpv needs to be merged
496 to both $ROOTs. Add edges to the digraph in order to avoid collisions
497 in the builddir. Currently, normal file locks would be inappropriate
498 for this purpose since emerge holds all of it's build dir locks from
502 for pkg in self._mergelist:
503 if not isinstance(pkg, Package):
504 # a satisfied blocker
508 if pkg.cpv not in cpv_map:
509 cpv_map[pkg.cpv] = [pkg]
511 for earlier_pkg in cpv_map[pkg.cpv]:
512 self._digraph.add(earlier_pkg, pkg,
513 priority=DepPriority(buildtime=True))
514 cpv_map[pkg.cpv].append(pkg)
516 class _pkg_failure(portage.exception.PortageException):
518 An instance of this class is raised by unmerge() when
519 an uninstallation fails.
522 def __init__(self, *pargs):
523 portage.exception.PortageException.__init__(self, pargs)
525 self.status = pargs[0]
527 def _schedule_fetch(self, fetcher):
529 Schedule a fetcher, in order to control the number of concurrent
530 fetchers. If self._max_jobs is greater than 1 then the fetch
531 queue is bypassed and the fetcher is started immediately,
532 otherwise it is added to the front of the parallel-fetch queue.
533 NOTE: The parallel-fetch queue is currently used to serialize
534 access to the parallel-fetch log, so changes in the log handling
535 would be required before it would be possible to enable
536 concurrent fetching within the parallel-fetch queue.
538 if self._max_jobs > 1:
541 self._task_queues.fetch.addFront(fetcher)
543 def _schedule_setup(self, setup_phase):
545 Schedule a setup phase on the merge queue, in order to
546 serialize unsandboxed access to the live filesystem.
548 if self._task_queues.merge.max_jobs > 1 and \
549 "ebuild-locks" in self.settings.features:
550 # Use a separate queue for ebuild-locks when the merge
551 # queue allows more than 1 job (due to parallel-install),
552 # since the portage.locks module does not behave as desired
553 # if we try to lock the same file multiple times
554 # concurrently from the same process.
555 self._task_queues.ebuild_locks.add(setup_phase)
557 self._task_queues.merge.add(setup_phase)
560 def _schedule_unpack(self, unpack_phase):
562 Schedule an unpack phase on the unpack queue, in order
563 to serialize $DISTDIR access for live ebuilds.
565 self._task_queues.unpack.add(unpack_phase)
567 def _find_blockers(self, new_pkg):
572 return self._find_blockers_impl(new_pkg)
575 def _find_blockers_impl(self, new_pkg):
576 if self._opts_ignore_blockers.intersection(self.myopts):
579 blocker_db = self._blocker_db[new_pkg.root]
582 for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
583 if new_pkg.slot_atom == blocking_pkg.slot_atom:
585 if new_pkg.cpv == blocking_pkg.cpv:
587 blocker_dblinks.append(portage.dblink(
588 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
589 self.pkgsettings[blocking_pkg.root], treetype="vartree",
590 vartree=self.trees[blocking_pkg.root]["vartree"]))
592 return blocker_dblinks
594 def _generate_digests(self):
596 Generate digests if necessary for --digests or FEATURES=digest.
597 In order to avoid interference, this must done before parallel
601 if '--fetchonly' in self.myopts:
604 digest = '--digest' in self.myopts
606 for pkgsettings in self.pkgsettings.values():
607 if pkgsettings.mycpv is not None:
608 # ensure that we are using global features
609 # settings rather than those from package.env
611 if 'digest' in pkgsettings.features:
618 for x in self._mergelist:
619 if not isinstance(x, Package) or \
620 x.type_name != 'ebuild' or \
621 x.operation != 'merge':
623 pkgsettings = self.pkgsettings[x.root]
624 if pkgsettings.mycpv is not None:
625 # ensure that we are using global features
626 # settings rather than those from package.env
628 if '--digest' not in self.myopts and \
629 'digest' not in pkgsettings.features:
631 portdb = x.root_config.trees['porttree'].dbapi
632 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
633 if ebuild_path is None:
634 raise AssertionError("ebuild not found for '%s'" % x.cpv)
635 pkgsettings['O'] = os.path.dirname(ebuild_path)
636 if not digestgen(mysettings=pkgsettings, myportdb=portdb):
638 "!!! Unable to generate manifest for '%s'.\n" \
639 % x.cpv, level=logging.ERROR, noiselevel=-1)
644 def _env_sanity_check(self):
646 Verify a sane environment before trying to build anything from source.
649 for x in self._mergelist:
650 if isinstance(x, Package) and not x.built:
657 for settings in self.pkgsettings.values():
658 for var in ("ARCH", ):
659 value = settings.get(var)
660 if value and value.strip():
662 msg = _("%(var)s is not set... "
663 "Are you missing the '%(configroot)setc/make.profile' symlink? "
664 "Is the symlink correct? "
665 "Is your portage tree complete?") % \
666 {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
668 out = portage.output.EOutput()
669 for line in textwrap.wrap(msg, 70):
675 def _check_manifests(self):
676 # Verify all the manifests now so that the user is notified of failure
677 # as soon as possible.
678 if "strict" not in self.settings.features or \
679 "--fetchonly" in self.myopts or \
680 "--fetch-all-uri" in self.myopts:
683 shown_verifying_msg = False
685 for myroot, pkgsettings in self.pkgsettings.items():
686 quiet_config = portage.config(clone=pkgsettings)
687 quiet_config["PORTAGE_QUIET"] = "1"
688 quiet_config.backup_changes("PORTAGE_QUIET")
689 quiet_settings[myroot] = quiet_config
694 for x in self._mergelist:
695 if not isinstance(x, Package) or \
696 x.type_name != "ebuild":
699 if x.operation == "uninstall":
702 if not shown_verifying_msg:
703 shown_verifying_msg = True
704 self._status_msg("Verifying ebuild manifests")
706 root_config = x.root_config
707 portdb = root_config.trees["porttree"].dbapi
708 quiet_config = quiet_settings[root_config.root]
709 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
710 if ebuild_path is None:
711 raise AssertionError("ebuild not found for '%s'" % x.cpv)
712 quiet_config["O"] = os.path.dirname(ebuild_path)
713 if not digestcheck([], quiet_config, strict=True):
720 def _add_prefetchers(self):
722 if not self._parallel_fetch:
725 if self._parallel_fetch:
726 self._status_msg("Starting parallel fetch")
728 prefetchers = self._prefetchers
730 for pkg in self._mergelist:
731 # mergelist can contain solved Blocker instances
732 if not isinstance(pkg, Package) or pkg.operation == "uninstall":
734 prefetcher = self._create_prefetcher(pkg)
735 if prefetcher is not None:
736 self._task_queues.fetch.add(prefetcher)
737 prefetchers[pkg] = prefetcher
739 # Start the first prefetcher immediately so that self._task()
740 # won't discard it. This avoids a case where the first
741 # prefetcher is discarded, causing the second prefetcher to
742 # occupy the fetch queue before the first fetcher has an
743 # opportunity to execute.
744 self._task_queues.fetch.schedule()
746 def _create_prefetcher(self, pkg):
748 @return: a prefetcher, or None if not applicable
752 if not isinstance(pkg, Package):
755 elif pkg.type_name == "ebuild":
757 prefetcher = EbuildFetcher(background=True,
758 config_pool=self._ConfigPool(pkg.root,
759 self._allocate_config, self._deallocate_config),
760 fetchonly=1, logfile=self._fetch_log,
761 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
763 elif pkg.type_name == "binary" and \
764 "--getbinpkg" in self.myopts and \
765 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
767 prefetcher = BinpkgPrefetcher(background=True,
768 pkg=pkg, scheduler=self._sched_iface)
772 def _run_pkg_pretend(self):
774 Since pkg_pretend output may be important, this method sends all
775 output directly to stdout (regardless of options like --quiet or
781 # Use a local PollScheduler instance here, since we don't
782 # want tasks here to trigger the usual Scheduler callbacks
783 # that handle job scheduling and status display.
784 sched_iface = PollScheduler().sched_iface
786 for x in self._mergelist:
787 if not isinstance(x, Package):
790 if x.operation == "uninstall":
793 if x.metadata["EAPI"] in ("0", "1", "2", "3"):
796 if "pretend" not in x.metadata.defined_phases:
799 out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
800 portage.util.writemsg_stdout(out_str, noiselevel=-1)
802 root_config = x.root_config
803 settings = self.pkgsettings[root_config.root]
806 # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
807 # have to validate it for each package
808 rval = _check_temp_dir(settings)
812 build_dir_path = os.path.join(
813 os.path.realpath(settings["PORTAGE_TMPDIR"]),
814 "portage", x.category, x.pf)
815 existing_buildir = os.path.isdir(build_dir_path)
816 settings["PORTAGE_BUILDDIR"] = build_dir_path
817 build_dir = EbuildBuildDir(scheduler=sched_iface,
824 # Clean up the existing build dir, in case pkg_pretend
825 # checks for available space (bug #390711).
829 infloc = os.path.join(build_dir_path, "build-info")
830 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
833 portdb = root_config.trees["porttree"].dbapi
834 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
835 if ebuild_path is None:
836 raise AssertionError(
837 "ebuild not found for '%s'" % x.cpv)
838 portage.package.ebuild.doebuild.doebuild_environment(
839 ebuild_path, "clean", settings=settings,
840 db=self.trees[settings['EROOT']][tree].dbapi)
841 clean_phase = EbuildPhase(background=False,
842 phase='clean', scheduler=sched_iface, settings=settings)
843 current_task = clean_phase
849 bintree = root_config.trees["bintree"].dbapi.bintree
852 # Display fetch on stdout, so that it's always clear what
853 # is consuming time here.
854 if bintree.isremote(x.cpv):
855 fetcher = BinpkgFetcher(pkg=x,
856 scheduler=sched_iface)
858 if fetcher.wait() != os.EX_OK:
861 fetched = fetcher.pkg_path
863 verifier = BinpkgVerifier(pkg=x,
864 scheduler=sched_iface)
865 current_task = verifier
867 if verifier.wait() != os.EX_OK:
872 bintree.inject(x.cpv, filename=fetched)
873 tbz2_file = bintree.getname(x.cpv)
874 infloc = os.path.join(build_dir_path, "build-info")
876 portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
877 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
878 settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
879 settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
883 portdb = root_config.trees["porttree"].dbapi
884 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
885 if ebuild_path is None:
886 raise AssertionError("ebuild not found for '%s'" % x.cpv)
887 settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
888 if self._build_opts.buildpkgonly:
889 settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
891 settings.configdict["pkg"]["MERGE_TYPE"] = "source"
893 portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
894 "pretend", settings=settings,
895 db=self.trees[settings['EROOT']][tree].dbapi)
897 prepare_build_dirs(root_config.root, settings, cleanup=0)
899 vardb = root_config.trees['vartree'].dbapi
900 settings["REPLACING_VERSIONS"] = " ".join(
901 set(portage.versions.cpv_getversion(match) \
902 for match in vardb.match(x.slot_atom) + \
903 vardb.match('='+x.cpv)))
904 pretend_phase = EbuildPhase(
905 phase="pretend", scheduler=sched_iface,
908 current_task = pretend_phase
909 pretend_phase.start()
910 ret = pretend_phase.wait()
913 portage.elog.elog_process(x.cpv, settings)
915 if current_task is not None and current_task.isAlive():
916 current_task.cancel()
917 clean_phase = EbuildPhase(background=False,
918 phase='clean', scheduler=sched_iface, settings=settings)
928 if "--resume" in self.myopts:
930 portage.writemsg_stdout(
931 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
932 self._logger.log(" *** Resuming merge...")
934 self._save_resume_list()
937 self._background = self._background_mode()
938 except self._unknown_internal_error:
941 rval = self._handle_self_update()
945 for root in self.trees:
946 root_config = self.trees[root]["root_config"]
948 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
949 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
950 # for ensuring sane $PWD (bug #239560) and storing elog messages.
951 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
952 if not tmpdir or not os.path.isdir(tmpdir):
953 msg = "The directory specified in your " + \
954 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
955 "does not exist. Please create this " + \
956 "directory or correct your PORTAGE_TMPDIR setting."
957 msg = textwrap.wrap(msg, 70)
958 out = portage.output.EOutput()
964 root_config.settings.unlock()
965 root_config.settings["PORTAGE_BACKGROUND"] = "1"
966 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
967 root_config.settings.lock()
969 self.pkgsettings[root] = portage.config(
970 clone=root_config.settings)
972 keep_going = "--keep-going" in self.myopts
973 fetchonly = self._build_opts.fetchonly
974 mtimedb = self._mtimedb
975 failed_pkgs = self._failed_pkgs
977 rval = self._generate_digests()
981 rval = self._env_sanity_check()
985 # TODO: Immediately recalculate deps here if --keep-going
986 # is enabled and corrupt manifests are detected.
987 rval = self._check_manifests()
988 if rval != os.EX_OK and not keep_going:
992 rval = self._run_pkg_pretend()
1000 def sighandler(signum, frame):
1001 signal.signal(signal.SIGINT, signal.SIG_IGN)
1002 signal.signal(signal.SIGTERM, signal.SIG_IGN)
1003 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
1006 received_signal.append(128 + signum)
1008 earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
1009 earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
1012 rval = self._merge()
1014 # Restore previous handlers
1015 if earlier_sigint_handler is not None:
1016 signal.signal(signal.SIGINT, earlier_sigint_handler)
1018 signal.signal(signal.SIGINT, signal.SIG_DFL)
1019 if earlier_sigterm_handler is not None:
1020 signal.signal(signal.SIGTERM, earlier_sigterm_handler)
1022 signal.signal(signal.SIGTERM, signal.SIG_DFL)
1025 sys.exit(received_signal[0])
1027 if rval == os.EX_OK or fetchonly or not keep_going:
1029 if "resume" not in mtimedb:
1031 mergelist = self._mtimedb["resume"].get("mergelist")
1038 for failed_pkg in failed_pkgs:
1039 mergelist.remove(list(failed_pkg.pkg))
1041 self._failed_pkgs_all.extend(failed_pkgs)
1047 if not self._calc_resume_list():
1050 clear_caches(self.trees)
1051 if not self._mergelist:
1054 self._save_resume_list()
1055 self._pkg_count.curval = 0
1056 self._pkg_count.maxval = len([x for x in self._mergelist \
1057 if isinstance(x, Package) and x.operation == "merge"])
1058 self._status_display.maxval = self._pkg_count.maxval
1060 self._logger.log(" *** Finished. Cleaning up...")
1063 self._failed_pkgs_all.extend(failed_pkgs)
1066 printer = portage.output.EOutput()
1067 background = self._background
1068 failure_log_shown = False
1069 if background and len(self._failed_pkgs_all) == 1:
1070 # If only one package failed then just show it's
1071 # whole log for easy viewing.
1072 failed_pkg = self._failed_pkgs_all[-1]
1074 log_file_real = None
1076 log_path = self._locate_failure_log(failed_pkg)
1077 if log_path is not None:
1079 log_file = open(_unicode_encode(log_path,
1080 encoding=_encodings['fs'], errors='strict'), mode='rb')
1084 if log_path.endswith('.gz'):
1085 log_file_real = log_file
1086 log_file = gzip.GzipFile(filename='',
1087 mode='rb', fileobj=log_file)
1089 if log_file is not None:
1091 for line in log_file:
1092 writemsg_level(line, noiselevel=-1)
1093 except zlib.error as e:
1094 writemsg_level("%s\n" % (e,), level=logging.ERROR,
1098 if log_file_real is not None:
1099 log_file_real.close()
1100 failure_log_shown = True
1102 # Dump mod_echo output now since it tends to flood the terminal.
1103 # This allows us to avoid having more important output, generated
1104 # later, from being swept away by the mod_echo output.
1105 mod_echo_output = _flush_elog_mod_echo()
1107 if background and not failure_log_shown and \
1108 self._failed_pkgs_all and \
1109 self._failed_pkgs_die_msgs and \
1110 not mod_echo_output:
1112 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1114 if mysettings["ROOT"] != "/":
1115 root_msg = " merged to %s" % mysettings["ROOT"]
1117 printer.einfo("Error messages for package %s%s:" % \
1118 (colorize("INFORM", key), root_msg))
1120 for phase in portage.const.EBUILD_PHASES:
1121 if phase not in logentries:
1123 for msgtype, msgcontent in logentries[phase]:
1124 if isinstance(msgcontent, basestring):
1125 msgcontent = [msgcontent]
1126 for line in msgcontent:
1127 printer.eerror(line.strip("\n"))
1129 if self._post_mod_echo_msgs:
1130 for msg in self._post_mod_echo_msgs:
1133 if len(self._failed_pkgs_all) > 1 or \
1134 (self._failed_pkgs_all and keep_going):
1135 if len(self._failed_pkgs_all) > 1:
1136 msg = "The following %d packages have " % \
1137 len(self._failed_pkgs_all) + \
1138 "failed to build or install:"
1140 msg = "The following package has " + \
1141 "failed to build or install:"
1144 for line in textwrap.wrap(msg, 72):
1145 printer.eerror(line)
1147 for failed_pkg in self._failed_pkgs_all:
1148 # Use _unicode_decode() to force unicode format string so
1149 # that Package.__unicode__() is called in python2.
1150 msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
1151 log_path = self._locate_failure_log(failed_pkg)
1152 if log_path is not None:
1153 msg += ", Log file:"
1155 if log_path is not None:
1156 printer.eerror(" '%s'" % colorize('INFORM', log_path))
1159 if self._failed_pkgs_all:
1163 def _elog_listener(self, mysettings, key, logentries, fulltext):
1164 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1166 self._failed_pkgs_die_msgs.append(
1167 (mysettings, key, errors))
1169 def _locate_failure_log(self, failed_pkg):
1171 log_paths = [failed_pkg.build_log]
1173 for log_path in log_paths:
1178 log_size = os.stat(log_path).st_size
1189 def _add_packages(self):
1190 pkg_queue = self._pkg_queue
1191 for pkg in self._mergelist:
1192 if isinstance(pkg, Package):
1193 pkg_queue.append(pkg)
1194 elif isinstance(pkg, Blocker):
1197 def _system_merge_started(self, merge):
1199 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1200 In general, this keeps track of installed system packages with
1201 unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
1202 a fragile situation, so we don't execute any unrelated builds until
1203 the circular dependencies are built and installed.
1205 graph = self._digraph
1208 pkg = merge.merge.pkg
1210 # Skip this if $ROOT != / since it shouldn't matter if there
1211 # are unsatisfied system runtime deps in this case.
1212 if pkg.root_config.settings["ROOT"] != "/":
1215 completed_tasks = self._completed_tasks
1216 unsatisfied = self._unsatisfied_system_deps
1218 def ignore_non_runtime_or_satisfied(priority):
1220 Ignore non-runtime and satisfied runtime priorities.
1222 if isinstance(priority, DepPriority) and \
1223 not priority.satisfied and \
1224 (priority.runtime or priority.runtime_post):
1228 # When checking for unsatisfied runtime deps, only check
1229 # direct deps since indirect deps are checked when the
1230 # corresponding parent is merged.
1231 for child in graph.child_nodes(pkg,
1232 ignore_priority=ignore_non_runtime_or_satisfied):
1233 if not isinstance(child, Package) or \
1234 child.operation == 'uninstall':
1238 if child.operation == 'merge' and \
1239 child not in completed_tasks:
1240 unsatisfied.add(child)
1242 def _merge_wait_exit_handler(self, task):
1243 self._merge_wait_scheduled.remove(task)
1244 self._merge_exit(task)
1246 def _merge_exit(self, merge):
1247 self._running_tasks.pop(id(merge), None)
1248 self._do_merge_exit(merge)
1249 self._deallocate_config(merge.merge.settings)
1250 if merge.returncode == os.EX_OK and \
1251 not merge.merge.pkg.installed:
1252 self._status_display.curval += 1
1253 self._status_display.merges = len(self._task_queues.merge)
1256 def _do_merge_exit(self, merge):
1257 pkg = merge.merge.pkg
1258 if merge.returncode != os.EX_OK:
1259 settings = merge.merge.settings
1260 build_dir = settings.get("PORTAGE_BUILDDIR")
1261 build_log = settings.get("PORTAGE_LOG_FILE")
1263 self._failed_pkgs.append(self._failed_pkg(
1264 build_dir=build_dir, build_log=build_log,
1266 returncode=merge.returncode))
1267 if not self._terminated_tasks:
1268 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1269 self._status_display.failed = len(self._failed_pkgs)
1272 self._task_complete(pkg)
1273 pkg_to_replace = merge.merge.pkg_to_replace
1274 if pkg_to_replace is not None:
1275 # When a package is replaced, mark it's uninstall
1276 # task complete (if any).
1277 if self._digraph is not None and \
1278 pkg_to_replace in self._digraph:
1280 self._pkg_queue.remove(pkg_to_replace)
1283 self._task_complete(pkg_to_replace)
1285 self._pkg_cache.pop(pkg_to_replace, None)
1290 # Call mtimedb.commit() after each merge so that
1291 # --resume still works after being interrupted
1292 # by reboot, sigkill or similar.
1293 mtimedb = self._mtimedb
1294 mtimedb["resume"]["mergelist"].remove(list(pkg))
1295 if not mtimedb["resume"]["mergelist"]:
1296 del mtimedb["resume"]
1299 def _build_exit(self, build):
1300 self._running_tasks.pop(id(build), None)
1301 if build.returncode == os.EX_OK and self._terminated_tasks:
1302 # We've been interrupted, so we won't
1303 # add this to the merge queue.
1305 self._deallocate_config(build.settings)
1306 elif build.returncode == os.EX_OK:
1308 merge = PackageMerge(merge=build)
1309 self._running_tasks[id(merge)] = merge
1310 if not build.build_opts.buildpkgonly and \
1311 build.pkg in self._deep_system_deps:
1312 # Since dependencies on system packages are frequently
1313 # unspecified, merge them only when no builds are executing.
1314 self._merge_wait_queue.append(merge)
1315 merge.addStartListener(self._system_merge_started)
1317 merge.addExitListener(self._merge_exit)
1318 self._task_queues.merge.add(merge)
1319 self._status_display.merges = len(self._task_queues.merge)
1321 settings = build.settings
1322 build_dir = settings.get("PORTAGE_BUILDDIR")
1323 build_log = settings.get("PORTAGE_LOG_FILE")
1325 self._failed_pkgs.append(self._failed_pkg(
1326 build_dir=build_dir, build_log=build_log,
1328 returncode=build.returncode))
1329 if not self._terminated_tasks:
1330 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1331 self._status_display.failed = len(self._failed_pkgs)
1332 self._deallocate_config(build.settings)
1334 self._status_display.running = self._jobs
1337 def _extract_exit(self, build):
1338 self._build_exit(build)
1340 def _task_complete(self, pkg):
1341 self._completed_tasks.add(pkg)
1342 self._unsatisfied_system_deps.discard(pkg)
1343 self._choose_pkg_return_early = False
1344 blocker_db = self._blocker_db[pkg.root]
1345 blocker_db.discardBlocker(pkg)
1349 self._add_prefetchers()
1350 self._add_packages()
1351 failed_pkgs = self._failed_pkgs
1352 portage.locks._quiet = self._background
1353 portage.elog.add_listener(self._elog_listener)
1359 self._main_loop_cleanup()
1360 portage.locks._quiet = False
1361 portage.elog.remove_listener(self._elog_listener)
1363 rval = failed_pkgs[-1].returncode
1367 def _main_loop_cleanup(self):
1368 del self._pkg_queue[:]
1369 self._completed_tasks.clear()
1370 self._deep_system_deps.clear()
1371 self._unsatisfied_system_deps.clear()
1372 self._choose_pkg_return_early = False
1373 self._status_display.reset()
1374 self._digraph = None
1375 self._task_queues.fetch.clear()
1376 self._prefetchers.clear()
1378 def _choose_pkg(self):
1380 Choose a task that has all its dependencies satisfied. This is used
1381 for parallel build scheduling, and ensures that we don't build
1382 anything with deep dependencies that have yet to be merged.
1385 if self._choose_pkg_return_early:
1388 if self._digraph is None:
1389 if self._is_work_scheduled() and \
1390 not ("--nodeps" in self.myopts and \
1391 (self._max_jobs is True or self._max_jobs > 1)):
1392 self._choose_pkg_return_early = True
1394 return self._pkg_queue.pop(0)
1396 if not self._is_work_scheduled():
1397 return self._pkg_queue.pop(0)
1399 self._prune_digraph()
1403 # Prefer uninstall operations when available.
1404 graph = self._digraph
1405 for pkg in self._pkg_queue:
1406 if pkg.operation == 'uninstall' and \
1407 not graph.child_nodes(pkg):
1411 if chosen_pkg is None:
1412 later = set(self._pkg_queue)
1413 for pkg in self._pkg_queue:
1415 if not self._dependent_on_scheduled_merges(pkg, later):
1419 if chosen_pkg is not None:
1420 self._pkg_queue.remove(chosen_pkg)
1422 if chosen_pkg is None:
1423 # There's no point in searching for a package to
1424 # choose until at least one of the existing jobs
1426 self._choose_pkg_return_early = True
1430 def _dependent_on_scheduled_merges(self, pkg, later):
1432 Traverse the subgraph of the given packages deep dependencies
1433 to see if it contains any scheduled merges.
1434 @param pkg: a package to check dependencies for
1436 @param later: packages for which dependence should be ignored
1437 since they will be merged later than pkg anyway and therefore
1438 delaying the merge of pkg will not result in a more optimal
1442 @returns: True if the package is dependent, False otherwise.
1445 graph = self._digraph
1446 completed_tasks = self._completed_tasks
1449 traversed_nodes = set([pkg])
1450 direct_deps = graph.child_nodes(pkg)
1451 node_stack = direct_deps
1452 direct_deps = frozenset(direct_deps)
1454 node = node_stack.pop()
1455 if node in traversed_nodes:
1457 traversed_nodes.add(node)
1458 if not ((node.installed and node.operation == "nomerge") or \
1459 (node.operation == "uninstall" and \
1460 node not in direct_deps) or \
1461 node in completed_tasks or \
1466 # Don't traverse children of uninstall nodes since
1467 # those aren't dependencies in the usual sense.
1468 if node.operation != "uninstall":
1469 node_stack.extend(graph.child_nodes(node))
1473 def _allocate_config(self, root):
1475 Allocate a unique config instance for a task in order
1476 to prevent interference between parallel tasks.
1478 if self._config_pool[root]:
1479 temp_settings = self._config_pool[root].pop()
1481 temp_settings = portage.config(clone=self.pkgsettings[root])
1482 # Since config.setcpv() isn't guaranteed to call config.reset() due to
1483 # performance reasons, call it here to make sure all settings from the
1484 # previous package get flushed out (such as PORTAGE_LOG_FILE).
1485 temp_settings.reload()
1486 temp_settings.reset()
1487 return temp_settings
1489 def _deallocate_config(self, settings):
1490 self._config_pool[settings['EROOT']].append(settings)
1492 def _main_loop(self):
1494 if self._opts_no_background.intersection(self.myopts):
1495 self._set_max_jobs(1)
1497 while self._schedule():
1498 self.sched_iface.run()
1502 if not self._is_work_scheduled():
1504 self.sched_iface.run()
1506 def _keep_scheduling(self):
1507 return bool(not self._terminated_tasks and self._pkg_queue and \
1508 not (self._failed_pkgs and not self._build_opts.fetchonly))
1510 def _is_work_scheduled(self):
1511 return bool(self._running_tasks)
1513 def _schedule_tasks(self):
1519 # When the number of jobs and merges drops to zero,
1520 # process a single merge from _merge_wait_queue if
1521 # it's not empty. We only process one since these are
1522 # special packages and we want to ensure that
1523 # parallel-install does not cause more than one of
1524 # them to install at the same time.
1525 if (self._merge_wait_queue and not self._jobs and
1526 not self._task_queues.merge):
1527 task = self._merge_wait_queue.popleft()
1528 task.addExitListener(self._merge_wait_exit_handler)
1529 self._merge_wait_scheduled.append(task)
1530 self._task_queues.merge.add(task)
1531 self._status_display.merges = len(self._task_queues.merge)
1534 if self._schedule_tasks_imp():
1537 self._status_display.display()
1539 # Cancel prefetchers if they're the only reason
1540 # the main poll loop is still running.
1541 if self._failed_pkgs and not self._build_opts.fetchonly and \
1542 not self._is_work_scheduled() and \
1543 self._task_queues.fetch:
1544 self._task_queues.fetch.clear()
1547 if not (state_change or \
1548 (self._merge_wait_queue and not self._jobs and
1549 not self._task_queues.merge)):
1552 return self._keep_scheduling()
1554 def _job_delay(self):
1557 @returns: True if job scheduling should be delayed, False otherwise.
1560 if self._jobs and self._max_load is not None:
1562 current_time = time.time()
1564 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1565 if delay > self._job_delay_max:
1566 delay = self._job_delay_max
1567 if (current_time - self._previous_job_start_time) < delay:
1572 def _schedule_tasks_imp(self):
1575 @returns: True if state changed, False otherwise.
1582 if not self._keep_scheduling():
1583 return bool(state_change)
1585 if self._choose_pkg_return_early or \
1586 self._merge_wait_scheduled or \
1587 (self._jobs and self._unsatisfied_system_deps) or \
1588 not self._can_add_job() or \
1590 return bool(state_change)
1592 pkg = self._choose_pkg()
1594 return bool(state_change)
1598 if not pkg.installed:
1599 self._pkg_count.curval += 1
1601 task = self._task(pkg)
1604 merge = PackageMerge(merge=task)
1605 self._running_tasks[id(merge)] = merge
1606 merge.addExitListener(self._merge_exit)
1607 self._task_queues.merge.addFront(merge)
1611 self._previous_job_start_time = time.time()
1612 self._status_display.running = self._jobs
1613 self._running_tasks[id(task)] = task
1614 task.addExitListener(self._extract_exit)
1615 self._task_queues.jobs.add(task)
1619 self._previous_job_start_time = time.time()
1620 self._status_display.running = self._jobs
1621 self._running_tasks[id(task)] = task
1622 task.addExitListener(self._build_exit)
1623 self._task_queues.jobs.add(task)
1625 return bool(state_change)
1627 def _task(self, pkg):
1629 pkg_to_replace = None
1630 if pkg.operation != "uninstall":
1631 vardb = pkg.root_config.trees["vartree"].dbapi
1632 previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
1633 if portage.cpv_getkey(x) == pkg.cp]
1634 if not previous_cpv and vardb.cpv_exists(pkg.cpv):
1635 # same cpv, different SLOT
1636 previous_cpv = [pkg.cpv]
1638 previous_cpv = previous_cpv.pop()
1639 pkg_to_replace = self._pkg(previous_cpv,
1640 "installed", pkg.root_config, installed=True,
1641 operation="uninstall")
1643 prefetcher = self._prefetchers.pop(pkg, None)
1644 if prefetcher is not None and not prefetcher.isAlive():
1646 self._task_queues.fetch._task_queue.remove(prefetcher)
1651 task = MergeListItem(args_set=self._args_set,
1652 background=self._background, binpkg_opts=self._binpkg_opts,
1653 build_opts=self._build_opts,
1654 config_pool=self._ConfigPool(pkg.root,
1655 self._allocate_config, self._deallocate_config),
1656 emerge_opts=self.myopts,
1657 find_blockers=self._find_blockers(pkg), logger=self._logger,
1658 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1659 pkg_to_replace=pkg_to_replace,
1660 prefetcher=prefetcher,
1661 scheduler=self._sched_iface,
1662 settings=self._allocate_config(pkg.root),
1663 statusMessage=self._status_msg,
1664 world_atom=self._world_atom)
1668 def _failed_pkg_msg(self, failed_pkg, action, preposition):
1669 pkg = failed_pkg.pkg
1670 msg = "%s to %s %s" % \
1671 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1672 if pkg.root_config.settings["ROOT"] != "/":
1673 msg += " %s %s" % (preposition, pkg.root)
1675 log_path = self._locate_failure_log(failed_pkg)
1676 if log_path is not None:
1677 msg += ", Log file:"
1678 self._status_msg(msg)
1680 if log_path is not None:
1681 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1683 def _status_msg(self, msg):
1685 Display a brief status message (no newlines) in the status display.
1686 This is called by tasks to provide feedback to the user. This
1687 delegates the resposibility of generating \r and \n control characters,
1688 to guarantee that lines are created or erased when necessary and
1692 @param msg: a brief status message (no newlines allowed)
1694 if not self._background:
1695 writemsg_level("\n")
1696 self._status_display.displayMessage(msg)
1698 def _save_resume_list(self):
1700 Do this before verifying the ebuild Manifests since it might
1701 be possible for the user to use --resume --skipfirst get past
1702 a non-essential package with a broken digest.
1704 mtimedb = self._mtimedb
1706 mtimedb["resume"] = {}
1707 # Stored as a dict starting with portage-2.1.6_rc1, and supported
1708 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
1709 # a list type for options.
1710 mtimedb["resume"]["myopts"] = self.myopts.copy()
1712 # Convert Atom instances to plain str.
1713 mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
1714 mtimedb["resume"]["mergelist"] = [list(x) \
1715 for x in self._mergelist \
1716 if isinstance(x, Package) and x.operation == "merge"]
1720 def _calc_resume_list(self):
1722 Use the current resume list to calculate a new one,
1723 dropping any packages with unsatisfied deps.
1725 @returns: True if successful, False otherwise.
1727 print(colorize("GOOD", "*** Resuming merge..."))
1729 # free some memory before creating
1730 # the resume depgraph
1731 self._destroy_graph()
1733 myparams = create_depgraph_params(self.myopts, None)
1737 success, mydepgraph, dropped_tasks = resume_depgraph(
1738 self.settings, self.trees, self._mtimedb, self.myopts,
1739 myparams, self._spinner)
1740 except depgraph.UnsatisfiedResumeDep as exc:
1741 # rename variable to avoid python-3.0 error:
1742 # SyntaxError: can not delete variable 'e' referenced in nested
1745 mydepgraph = e.depgraph
1746 dropped_tasks = set()
1749 def unsatisfied_resume_dep_msg():
1750 mydepgraph.display_problems()
1751 out = portage.output.EOutput()
1752 out.eerror("One or more packages are either masked or " + \
1753 "have missing dependencies:")
1756 show_parents = set()
1758 if dep.parent in show_parents:
1760 show_parents.add(dep.parent)
1761 if dep.atom is None:
1762 out.eerror(indent + "Masked package:")
1763 out.eerror(2 * indent + str(dep.parent))
1766 out.eerror(indent + str(dep.atom) + " pulled in by:")
1767 out.eerror(2 * indent + str(dep.parent))
1769 msg = "The resume list contains packages " + \
1770 "that are either masked or have " + \
1771 "unsatisfied dependencies. " + \
1772 "Please restart/continue " + \
1773 "the operation manually, or use --skipfirst " + \
1774 "to skip the first package in the list and " + \
1775 "any other packages that may be " + \
1776 "masked or have missing dependencies."
1777 for line in textwrap.wrap(msg, 72):
1779 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1782 if success and self._show_list():
1783 mylist = mydepgraph.altlist()
1785 if "--tree" in self.myopts:
1787 mydepgraph.display(mylist, favorites=self._favorites)
1790 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1792 mydepgraph.display_problems()
1793 self._init_graph(mydepgraph.schedulerGraph())
1796 for task in dropped_tasks:
1797 if not (isinstance(task, Package) and task.operation == "merge"):
1800 msg = "emerge --keep-going:" + \
1802 if pkg.root_config.settings["ROOT"] != "/":
1803 msg += " for %s" % (pkg.root,)
1804 msg += " dropped due to unsatisfied dependency."
1805 for line in textwrap.wrap(msg, msg_width):
1806 eerror(line, phase="other", key=pkg.cpv)
1807 settings = self.pkgsettings[pkg.root]
1808 # Ensure that log collection from $T is disabled inside
1809 # elog_process(), since any logs that might exist are
1811 settings.pop("T", None)
1812 portage.elog.elog_process(pkg.cpv, settings)
1813 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1817 def _show_list(self):
1818 myopts = self.myopts
1819 if "--quiet" not in myopts and \
1820 ("--ask" in myopts or "--tree" in myopts or \
1821 "--verbose" in myopts):
1825 def _world_atom(self, pkg):
1827 Add or remove the package to the world file, but only if
1828 it's supposed to be added or removed. Otherwise, do nothing.
1831 if set(("--buildpkgonly", "--fetchonly",
1833 "--oneshot", "--onlydeps",
1834 "--pretend")).intersection(self.myopts):
1837 if pkg.root != self.target_root:
1840 args_set = self._args_set
1841 if not args_set.findAtomForPackage(pkg):
1844 logger = self._logger
1845 pkg_count = self._pkg_count
1846 root_config = pkg.root_config
1847 world_set = root_config.sets["selected"]
1848 world_locked = False
1849 if hasattr(world_set, "lock"):
1854 if hasattr(world_set, "load"):
1855 world_set.load() # maybe it's changed on disk
1857 if pkg.operation == "uninstall":
1858 if hasattr(world_set, "cleanPackage"):
1859 world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
1861 if hasattr(world_set, "remove"):
1862 for s in pkg.root_config.setconfig.active:
1863 world_set.remove(SETPREFIX+s)
1865 atom = create_world_atom(pkg, args_set, root_config)
1867 if hasattr(world_set, "add"):
1868 self._status_msg(('Recording %s in "world" ' + \
1869 'favorites file...') % atom)
1870 logger.log(" === (%s of %s) Updating world file (%s)" % \
1871 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1874 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1875 (atom,), level=logging.WARN, noiselevel=-1)
1880 def _pkg(self, cpv, type_name, root_config, installed=False,
1881 operation=None, myrepo=None):
1883 Get a package instance from the cache, or create a new
1884 one if necessary. Raises KeyError from aux_get if it
1885 failures for some reason (package does not exist or is
1889 # Reuse existing instance when available.
1890 pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
1891 type_name=type_name, repo_name=myrepo, root_config=root_config,
1892 installed=installed, operation=operation))
1897 tree_type = depgraph.pkg_tree_map[type_name]
1898 db = root_config.trees[tree_type].dbapi
1899 db_keys = list(self.trees[root_config.root][
1900 tree_type].dbapi._aux_cache_keys)
1901 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
1902 pkg = Package(built=(type_name != "ebuild"),
1903 cpv=cpv, installed=installed, metadata=metadata,
1904 root_config=root_config, type_name=type_name)
1905 self._pkg_cache[pkg] = pkg