1 # Copyright 1999-2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
19 from portage import StringIO
20 from portage import os
21 from portage import _encodings
22 from portage import _unicode_decode, _unicode_encode
23 from portage.cache.mappings import slot_dict_class
24 from portage.const import LIBC_PACKAGE_ATOM
25 from portage.elog.messages import eerror
26 from portage.localization import _
27 from portage.output import colorize, create_color_func, red
28 bad = create_color_func("BAD")
29 from portage._sets import SETPREFIX
30 from portage._sets.base import InternalPackageSet
31 from portage.util import writemsg, writemsg_level
32 from portage.package.ebuild.digestcheck import digestcheck
33 from portage.package.ebuild.digestgen import digestgen
34 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
36 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
37 from _emerge.Blocker import Blocker
38 from _emerge.BlockerDB import BlockerDB
39 from _emerge.clear_caches import clear_caches
40 from _emerge.create_depgraph_params import create_depgraph_params
41 from _emerge.create_world_atom import create_world_atom
42 from _emerge.DepPriority import DepPriority
43 from _emerge.depgraph import depgraph, resume_depgraph
44 from _emerge.EbuildFetcher import EbuildFetcher
45 from _emerge.EbuildPhase import EbuildPhase
46 from _emerge.emergelog import emergelog, _emerge_log_dir
47 from _emerge.FakeVartree import FakeVartree
48 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
49 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
50 from _emerge.JobStatusDisplay import JobStatusDisplay
51 from _emerge.MergeListItem import MergeListItem
52 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
53 from _emerge.Package import Package
54 from _emerge.PackageMerge import PackageMerge
55 from _emerge.PollScheduler import PollScheduler
56 from _emerge.RootConfig import RootConfig
57 from _emerge.SlotObject import SlotObject
58 from _emerge.SequentialTaskQueue import SequentialTaskQueue
60 if sys.hexversion >= 0x3000000:
63 class Scheduler(PollScheduler):
65 # max time between display status updates (milliseconds)
66 _max_display_latency = 3000
68 _opts_ignore_blockers = \
69 frozenset(["--buildpkgonly",
70 "--fetchonly", "--fetch-all-uri",
71 "--nodeps", "--pretend"])
73 _opts_no_background = \
74 frozenset(["--pretend",
75 "--fetchonly", "--fetch-all-uri"])
77 _opts_no_restart = frozenset(["--buildpkgonly",
78 "--fetchonly", "--fetch-all-uri", "--pretend"])
80 _bad_resume_opts = set(["--ask", "--changelog",
81 "--resume", "--skipfirst"])
83 _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
85 class _iface_class(SlotObject):
86 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
87 "dblinkElog", "dblinkEmergeLog", "fetch",
88 "output", "register", "schedule",
89 "scheduleSetup", "scheduleUnpack", "scheduleYield",
92 class _fetch_iface_class(SlotObject):
93 __slots__ = ("log_file", "schedule")
95 _task_queues_class = slot_dict_class(
96 ("merge", "jobs", "fetch", "unpack"), prefix="")
98 class _build_opts_class(SlotObject):
99 __slots__ = ("buildpkg", "buildpkgonly",
100 "fetch_all_uri", "fetchonly", "pretend")
102 class _binpkg_opts_class(SlotObject):
103 __slots__ = ("fetchonly", "getbinpkg", "pretend")
105 class _pkg_count_class(SlotObject):
106 __slots__ = ("curval", "maxval")
108 class _emerge_log_class(SlotObject):
109 __slots__ = ("xterm_titles",)
111 def log(self, *pargs, **kwargs):
112 if not self.xterm_titles:
113 # Avoid interference with the scheduler's status display.
114 kwargs.pop("short_msg", None)
115 emergelog(self.xterm_titles, *pargs, **kwargs)
117 class _failed_pkg(SlotObject):
118 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
120 class _ConfigPool(object):
121 """Interface for a task to temporarily allocate a config
122 instance from a pool. This allows a task to be constructed
123 long before the config instance actually becomes needed, like
124 when prefetchers are constructed for the whole merge list."""
125 __slots__ = ("_root", "_allocate", "_deallocate")
126 def __init__(self, root, allocate, deallocate):
128 self._allocate = allocate
129 self._deallocate = deallocate
131 return self._allocate(self._root)
132 def deallocate(self, settings):
133 self._deallocate(settings)
135 class _unknown_internal_error(portage.exception.PortageException):
137 Used internally to terminate scheduling. The specific reason for
138 the failure should have been dumped to stderr.
140 def __init__(self, value=""):
141 portage.exception.PortageException.__init__(self, value)
143 def __init__(self, settings, trees, mtimedb, myopts,
144 spinner, mergelist=None, favorites=None, graph_config=None):
145 PollScheduler.__init__(self)
147 if mergelist is not None:
148 warnings.warn("The mergelist parameter of the " + \
149 "_emerge.Scheduler constructor is now unused. Use " + \
150 "the graph_config parameter instead.",
151 DeprecationWarning, stacklevel=2)
153 self.settings = settings
154 self.target_root = settings["ROOT"]
157 self._spinner = spinner
158 self._mtimedb = mtimedb
159 self._favorites = favorites
160 self._args_set = InternalPackageSet(favorites, allow_repo=True)
161 self._build_opts = self._build_opts_class()
162 for k in self._build_opts.__slots__:
163 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
164 self._binpkg_opts = self._binpkg_opts_class()
165 for k in self._binpkg_opts.__slots__:
166 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
169 self._logger = self._emerge_log_class()
170 self._task_queues = self._task_queues_class()
171 for k in self._task_queues.allowed_keys:
172 setattr(self._task_queues, k,
173 SequentialTaskQueue())
175 # Holds merges that will wait to be executed when no builds are
176 # executing. This is useful for system packages since dependencies
177 # on system packages are frequently unspecified.
178 self._merge_wait_queue = []
179 # Holds merges that have been transfered from the merge_wait_queue to
180 # the actual merge queue. They are removed from this list upon
181 # completion. Other packages can start building only when this list is
183 self._merge_wait_scheduled = []
185 # Holds system packages and their deep runtime dependencies. Before
186 # being merged, these packages go to merge_wait_queue, to be merged
187 # when no other packages are building.
188 self._deep_system_deps = set()
190 # Holds packages to merge which will satisfy currently unsatisfied
191 # deep runtime dependencies of system packages. If this is not empty
192 # then no parallel builds will be spawned until it is empty. This
193 # minimizes the possibility that a build will fail due to the system
194 # being in a fragile state. For example, see bug #259954.
195 self._unsatisfied_system_deps = set()
197 self._status_display = JobStatusDisplay(
198 xterm_titles=('notitles' not in settings.features))
199 self._max_load = myopts.get("--load-average")
200 max_jobs = myopts.get("--jobs")
203 self._set_max_jobs(max_jobs)
205 # The root where the currently running
206 # portage instance is installed.
207 self._running_root = trees["/"]["root_config"]
209 if settings.get("PORTAGE_DEBUG", "") == "1":
211 self.pkgsettings = {}
212 self._config_pool = {}
213 for root in self.trees:
214 self._config_pool[root] = []
216 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
217 schedule=self._schedule_fetch)
218 self._sched_iface = self._iface_class(
219 dblinkEbuildPhase=self._dblink_ebuild_phase,
220 dblinkDisplayMerge=self._dblink_display_merge,
221 dblinkElog=self._dblink_elog,
222 dblinkEmergeLog=self._dblink_emerge_log,
223 fetch=fetch_iface, output=self._task_output,
224 register=self._register,
225 schedule=self._schedule_wait,
226 scheduleSetup=self._schedule_setup,
227 scheduleUnpack=self._schedule_unpack,
228 scheduleYield=self._schedule_yield,
229 unregister=self._unregister)
231 self._prefetchers = weakref.WeakValueDictionary()
233 self._running_tasks = set()
234 self._completed_tasks = set()
236 self._failed_pkgs = []
237 self._failed_pkgs_all = []
238 self._failed_pkgs_die_msgs = []
239 self._post_mod_echo_msgs = []
240 self._parallel_fetch = False
241 self._init_graph(graph_config)
242 merge_count = len([x for x in self._mergelist \
243 if isinstance(x, Package) and x.operation == "merge"])
244 self._pkg_count = self._pkg_count_class(
245 curval=0, maxval=merge_count)
246 self._status_display.maxval = self._pkg_count.maxval
248 # The load average takes some time to respond when new
249 # jobs are added, so we need to limit the rate of adding
251 self._job_delay_max = 10
252 self._job_delay_factor = 1.0
253 self._job_delay_exp = 1.5
254 self._previous_job_start_time = None
256 # This is used to memoize the _choose_pkg() result when
257 # no packages can be chosen until one of the existing
259 self._choose_pkg_return_early = False
261 features = self.settings.features
262 if "parallel-fetch" in features and \
263 not ("--pretend" in self.myopts or \
264 "--fetch-all-uri" in self.myopts or \
265 "--fetchonly" in self.myopts):
266 if "distlocks" not in features:
267 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
268 portage.writemsg(red("!!!")+" parallel-fetching " + \
269 "requires the distlocks feature enabled"+"\n",
271 portage.writemsg(red("!!!")+" you have it disabled, " + \
272 "thus parallel-fetching is being disabled"+"\n",
274 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
275 elif merge_count > 1:
276 self._parallel_fetch = True
278 if self._parallel_fetch:
279 # clear out existing fetch log if it exists
281 open(self._fetch_log, 'w')
282 except EnvironmentError:
285 self._running_portage = None
286 portage_match = self._running_root.trees["vartree"].dbapi.match(
287 portage.const.PORTAGE_PACKAGE_ATOM)
289 cpv = portage_match.pop()
290 self._running_portage = self._pkg(cpv, "installed",
291 self._running_root, installed=True)
293 def _init_graph(self, graph_config):
295 Initialization structures used for dependency calculations
296 involving currently installed packages.
298 # TODO: Replace the BlockerDB with a depgraph of installed packages
299 # that's updated incrementally with each upgrade/uninstall operation
300 # This will be useful for making quick and safe decisions with respect
301 # to aggressive parallelization discussed in bug #279623.
302 self._set_graph_config(graph_config)
303 self._blocker_db = {}
304 for root in self.trees:
305 if graph_config is None:
306 fake_vartree = FakeVartree(self.trees[root]["root_config"])
308 fake_vartree = graph_config.trees[root]['vartree']
309 self._blocker_db[root] = BlockerDB(fake_vartree)
311 def _destroy_graph(self):
313 Use this to free memory at the beginning of _calc_resume_list().
314 After _calc_resume_list(), the _init_graph() method
315 must to be called in order to re-generate the structures that
316 this method destroys.
318 self._blocker_db = None
319 self._set_graph_config(None)
322 def _poll(self, timeout=None):
328 if not self._poll_event_handlers:
330 if not self._poll_event_handlers:
332 "timeout is None and there are no poll() event handlers")
333 previous_count = len(self._poll_event_queue)
334 PollScheduler._poll(self, timeout=self._max_display_latency)
335 self._status_display.display()
336 if previous_count != len(self._poll_event_queue):
339 elif timeout <= self._max_display_latency:
340 PollScheduler._poll(self, timeout=timeout)
342 # The display is updated by _schedule() above, so it would be
343 # redundant to update it here when timeout is 0.
346 self._status_display.display()
349 remaining_timeout = timeout
350 start_time = time.time()
352 previous_count = len(self._poll_event_queue)
353 PollScheduler._poll(self,
354 timeout=min(self._max_display_latency, remaining_timeout))
355 self._status_display.display()
356 if previous_count != len(self._poll_event_queue):
358 elapsed_time = time.time() - start_time
360 # The system clock has changed such that start_time
361 # is now in the future, so just assume that the
362 # timeout has already elapsed.
364 remaining_timeout = timeout - 1000 * elapsed_time
365 if remaining_timeout <= 0:
368 def _set_max_jobs(self, max_jobs):
369 self._max_jobs = max_jobs
370 self._task_queues.jobs.max_jobs = max_jobs
372 def _background_mode(self):
374 Check if background mode is enabled and adjust states as necessary.
377 @returns: True if background mode is enabled, False otherwise.
379 background = (self._max_jobs is True or \
380 self._max_jobs > 1 or "--quiet" in self.myopts \
381 or "--quiet-build" in self.myopts) and \
382 not bool(self._opts_no_background.intersection(self.myopts))
385 interactive_tasks = self._get_interactive_tasks()
386 if interactive_tasks:
388 writemsg_level(">>> Sending package output to stdio due " + \
389 "to interactive package(s):\n",
390 level=logging.INFO, noiselevel=-1)
392 for pkg in interactive_tasks:
393 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
395 pkg_str += " for " + pkg.root
398 writemsg_level("".join("%s\n" % (l,) for l in msg),
399 level=logging.INFO, noiselevel=-1)
400 if self._max_jobs is True or self._max_jobs > 1:
401 self._set_max_jobs(1)
402 writemsg_level(">>> Setting --jobs=1 due " + \
403 "to the above interactive package(s)\n",
404 level=logging.INFO, noiselevel=-1)
405 writemsg_level(">>> In order to temporarily mask " + \
406 "interactive updates, you may\n" + \
407 ">>> specify --accept-properties=-interactive\n",
408 level=logging.INFO, noiselevel=-1)
409 self._status_display.quiet = \
411 ("--quiet" in self.myopts and \
412 "--verbose" not in self.myopts)
414 self._logger.xterm_titles = \
415 "notitles" not in self.settings.features and \
416 self._status_display.quiet
420 def _get_interactive_tasks(self):
421 interactive_tasks = []
422 for task in self._mergelist:
423 if not (isinstance(task, Package) and \
424 task.operation == "merge"):
426 if 'interactive' in task.metadata.properties:
427 interactive_tasks.append(task)
428 return interactive_tasks
430 def _set_graph_config(self, graph_config):
432 if graph_config is None:
433 self._graph_config = None
436 self._deep_system_deps.clear()
439 self._graph_config = graph_config
440 self._digraph = graph_config.graph
441 self._mergelist = graph_config.mergelist
443 if "--nodeps" in self.myopts or \
444 (self._max_jobs is not True and self._max_jobs < 2):
447 graph_config.graph = None
448 graph_config.pkg_cache.clear()
449 self._deep_system_deps.clear()
452 self._find_system_deps()
453 self._prune_digraph()
454 self._prevent_builddir_collisions()
455 self._implicit_libc_deps()
456 if '--debug' in self.myopts:
457 writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
458 self._digraph.debug_print()
459 writemsg("\n", noiselevel=-1)
461 def _find_system_deps(self):
463 Find system packages and their deep runtime dependencies. Before being
464 merged, these packages go to merge_wait_queue, to be merged when no
465 other packages are building.
466 NOTE: This can only find deep system deps if the system set has been
467 added to the graph and traversed deeply (the depgraph "complete"
468 parameter will do this, triggered by emerge --complete-graph option).
470 deep_system_deps = self._deep_system_deps
471 deep_system_deps.clear()
472 deep_system_deps.update(
473 _find_deep_system_runtime_deps(self._digraph))
474 deep_system_deps.difference_update([pkg for pkg in \
475 deep_system_deps if pkg.operation != "merge"])
477 def _prune_digraph(self):
479 Prune any root nodes that are irrelevant.
482 graph = self._digraph
483 completed_tasks = self._completed_tasks
484 removed_nodes = set()
486 for node in graph.root_nodes():
487 if not isinstance(node, Package) or \
488 (node.installed and node.operation == "nomerge") or \
490 node in completed_tasks:
491 removed_nodes.add(node)
493 graph.difference_update(removed_nodes)
494 if not removed_nodes:
496 removed_nodes.clear()
498 def _prevent_builddir_collisions(self):
500 When building stages, sometimes the same exact cpv needs to be merged
501 to both $ROOTs. Add edges to the digraph in order to avoid collisions
502 in the builddir. Currently, normal file locks would be inappropriate
503 for this purpose since emerge holds all of it's build dir locks from
507 for pkg in self._mergelist:
508 if not isinstance(pkg, Package):
509 # a satisfied blocker
513 if pkg.cpv not in cpv_map:
514 cpv_map[pkg.cpv] = [pkg]
516 for earlier_pkg in cpv_map[pkg.cpv]:
517 self._digraph.add(earlier_pkg, pkg,
518 priority=DepPriority(buildtime=True))
519 cpv_map[pkg.cpv].append(pkg)
521 def _implicit_libc_deps(self):
523 Create implicit dependencies on libc, in order to ensure that libc
524 is installed as early as possible (see bug #303567). If the merge
525 list contains both a new-style virtual and an old-style PROVIDE
526 virtual, the new-style virtual is used.
528 implicit_libc_roots = set([self._running_root.root])
529 libc_set = InternalPackageSet([LIBC_PACKAGE_ATOM])
532 for pkg in self._mergelist:
533 if not isinstance(pkg, Package):
534 # a satisfied blocker
538 if pkg.root in implicit_libc_roots and \
539 pkg.operation == 'merge':
540 if libc_set.findAtomForPackage(pkg):
541 if pkg.category == 'virtual':
546 raise AssertionError(
547 "found 2 libc matches: %s and %s" % \
551 # Prefer new-style virtuals over old-style PROVIDE virtuals.
552 libc_pkg_map = norm_libc_pkgs.copy()
553 libc_pkg_map.update(virt_libc_pkgs)
555 # Only add a dep when the version changes.
556 for libc_pkg in list(libc_pkg_map.values()):
557 if libc_pkg.root_config.trees['vartree'].dbapi.cpv_exists(
559 del libc_pkg_map[pkg.root]
564 libc_pkgs = set(libc_pkg_map.values())
565 earlier_libc_pkgs = set()
567 for pkg in self._mergelist:
568 if not isinstance(pkg, Package):
569 # a satisfied blocker
573 if pkg.root in implicit_libc_roots and \
574 pkg.operation == 'merge':
576 earlier_libc_pkgs.add(pkg)
578 my_libc = libc_pkg_map.get(pkg.root)
579 if my_libc is not None and \
580 my_libc in earlier_libc_pkgs:
581 self._digraph.add(my_libc, pkg,
582 priority=DepPriority(buildtime=True))
584 class _pkg_failure(portage.exception.PortageException):
586 An instance of this class is raised by unmerge() when
587 an uninstallation fails.
590 def __init__(self, *pargs):
591 portage.exception.PortageException.__init__(self, pargs)
593 self.status = pargs[0]
595 def _schedule_fetch(self, fetcher):
597 Schedule a fetcher on the fetch queue, in order to
598 serialize access to the fetch log.
600 self._task_queues.fetch.addFront(fetcher)
602 def _schedule_setup(self, setup_phase):
604 Schedule a setup phase on the merge queue, in order to
605 serialize unsandboxed access to the live filesystem.
607 self._task_queues.merge.add(setup_phase)
610 def _schedule_unpack(self, unpack_phase):
612 Schedule an unpack phase on the unpack queue, in order
613 to serialize $DISTDIR access for live ebuilds.
615 self._task_queues.unpack.add(unpack_phase)
617 def _find_blockers(self, new_pkg):
619 Returns a callable which should be called only when
620 the vdb lock has been acquired.
623 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
626 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
627 if self._opts_ignore_blockers.intersection(self.myopts):
630 # Call gc.collect() here to avoid heap overflow that
631 # triggers 'Cannot allocate memory' errors (reported
635 blocker_db = self._blocker_db[new_pkg.root]
638 for blocking_pkg in blocker_db.findInstalledBlockers(
639 new_pkg, acquire_lock=acquire_lock):
640 if new_pkg.slot_atom == blocking_pkg.slot_atom:
642 if new_pkg.cpv == blocking_pkg.cpv:
644 blocker_dblinks.append(portage.dblink(
645 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
646 self.pkgsettings[blocking_pkg.root], treetype="vartree",
647 vartree=self.trees[blocking_pkg.root]["vartree"]))
651 return blocker_dblinks
653 def _dblink_pkg(self, pkg_dblink):
654 cpv = pkg_dblink.mycpv
655 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
656 root_config = self.trees[pkg_dblink.myroot]["root_config"]
657 installed = type_name == "installed"
658 return self._pkg(cpv, type_name, root_config, installed=installed)
660 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
662 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
666 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
668 out_str = out.getvalue()
670 self._task_output(out_str, log_path=log_path)
672 def _dblink_emerge_log(self, msg):
673 self._logger.log(msg)
675 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
676 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
677 background = self._background
680 if not (background and level < logging.WARN):
681 portage.util.writemsg_level(msg,
682 level=level, noiselevel=noiselevel)
684 self._task_output(msg, log_path=log_path)
686 def _dblink_ebuild_phase(self,
687 pkg_dblink, pkg_dbapi, ebuild_path, phase):
689 Using this callback for merge phases allows the scheduler
690 to run while these phases execute asynchronously, and allows
691 the scheduler control output handling.
694 scheduler = self._sched_iface
695 settings = pkg_dblink.settings
696 pkg = self._dblink_pkg(pkg_dblink)
697 background = self._background
698 log_path = settings.get("PORTAGE_LOG_FILE")
700 if phase in ('die_hooks', 'success_hooks'):
701 ebuild_phase = MiscFunctionsProcess(background=background,
702 commands=[phase], phase=phase,
703 scheduler=scheduler, settings=settings)
705 ebuild_phase = EbuildPhase(background=background,
706 phase=phase, scheduler=scheduler,
711 return ebuild_phase.returncode
713 def _generate_digests(self):
715 Generate digests if necessary for --digests or FEATURES=digest.
716 In order to avoid interference, this must done before parallel
720 if '--fetchonly' in self.myopts:
723 digest = '--digest' in self.myopts
725 for pkgsettings in self.pkgsettings.values():
726 if pkgsettings.mycpv is not None:
727 # ensure that we are using global features
728 # settings rather than those from package.env
730 if 'digest' in pkgsettings.features:
737 for x in self._mergelist:
738 if not isinstance(x, Package) or \
739 x.type_name != 'ebuild' or \
740 x.operation != 'merge':
742 pkgsettings = self.pkgsettings[x.root]
743 if pkgsettings.mycpv is not None:
744 # ensure that we are using global features
745 # settings rather than those from package.env
747 if '--digest' not in self.myopts and \
748 'digest' not in pkgsettings.features:
750 portdb = x.root_config.trees['porttree'].dbapi
751 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
752 if ebuild_path is None:
753 raise AssertionError("ebuild not found for '%s'" % x.cpv)
754 pkgsettings['O'] = os.path.dirname(ebuild_path)
755 if not digestgen(mysettings=pkgsettings, myportdb=portdb):
757 "!!! Unable to generate manifest for '%s'.\n" \
758 % x.cpv, level=logging.ERROR, noiselevel=-1)
763 def _env_sanity_check(self):
765 Verify a sane environment before trying to build anything from source.
768 for x in self._mergelist:
769 if isinstance(x, Package) and not x.built:
776 for settings in self.pkgsettings.values():
777 for var in ("ARCH", ):
778 value = settings.get(var)
779 if value and value.strip():
781 msg = _("%(var)s is not set... "
782 "Are you missing the '%(configroot)setc/make.profile' symlink? "
783 "Is the symlink correct? "
784 "Is your portage tree complete?") % \
785 {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
787 out = portage.output.EOutput()
788 for line in textwrap.wrap(msg, 70):
794 def _check_manifests(self):
795 # Verify all the manifests now so that the user is notified of failure
796 # as soon as possible.
797 if "strict" not in self.settings.features or \
798 "--fetchonly" in self.myopts or \
799 "--fetch-all-uri" in self.myopts:
802 shown_verifying_msg = False
804 for myroot, pkgsettings in self.pkgsettings.items():
805 quiet_config = portage.config(clone=pkgsettings)
806 quiet_config["PORTAGE_QUIET"] = "1"
807 quiet_config.backup_changes("PORTAGE_QUIET")
808 quiet_settings[myroot] = quiet_config
813 for x in self._mergelist:
814 if not isinstance(x, Package) or \
815 x.type_name != "ebuild":
818 if x.operation == "uninstall":
821 if not shown_verifying_msg:
822 shown_verifying_msg = True
823 self._status_msg("Verifying ebuild manifests")
825 root_config = x.root_config
826 portdb = root_config.trees["porttree"].dbapi
827 quiet_config = quiet_settings[root_config.root]
828 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
829 if ebuild_path is None:
830 raise AssertionError("ebuild not found for '%s'" % x.cpv)
831 quiet_config["O"] = os.path.dirname(ebuild_path)
832 if not digestcheck([], quiet_config, strict=True):
839 def _add_prefetchers(self):
841 if not self._parallel_fetch:
844 if self._parallel_fetch:
845 self._status_msg("Starting parallel fetch")
847 prefetchers = self._prefetchers
848 getbinpkg = "--getbinpkg" in self.myopts
850 # In order to avoid "waiting for lock" messages
851 # at the beginning, which annoy users, never
852 # spawn a prefetcher for the first package.
853 for pkg in self._mergelist[1:]:
854 # mergelist can contain solved Blocker instances
855 if not isinstance(pkg, Package) or pkg.operation == "uninstall":
857 prefetcher = self._create_prefetcher(pkg)
858 if prefetcher is not None:
859 self._task_queues.fetch.add(prefetcher)
860 prefetchers[pkg] = prefetcher
862 def _create_prefetcher(self, pkg):
864 @return: a prefetcher, or None if not applicable
868 if not isinstance(pkg, Package):
871 elif pkg.type_name == "ebuild":
873 prefetcher = EbuildFetcher(background=True,
874 config_pool=self._ConfigPool(pkg.root,
875 self._allocate_config, self._deallocate_config),
876 fetchonly=1, logfile=self._fetch_log,
877 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
879 elif pkg.type_name == "binary" and \
880 "--getbinpkg" in self.myopts and \
881 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
883 prefetcher = BinpkgPrefetcher(background=True,
884 pkg=pkg, scheduler=self._sched_iface)
888 def _is_restart_scheduled(self):
890 Check if the merge list contains a replacement
891 for the current running instance, that will result
892 in restart after merge.
894 @returns: True if a restart is scheduled, False otherwise.
896 if self._opts_no_restart.intersection(self.myopts):
899 mergelist = self._mergelist
901 for i, pkg in enumerate(mergelist):
902 if self._is_restart_necessary(pkg) and \
903 i != len(mergelist) - 1:
908 def _is_restart_necessary(self, pkg):
910 @return: True if merging the given package
911 requires restart, False otherwise.
914 # Figure out if we need a restart.
915 if pkg.root == self._running_root.root and \
916 portage.match_from_list(
917 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
918 if self._running_portage is None:
920 elif pkg.cpv != self._running_portage.cpv or \
921 '9999' in pkg.cpv or \
922 'git' in pkg.inherited:
926 def _restart_if_necessary(self, pkg):
928 Use execv() to restart emerge. This happens
929 if portage upgrades itself and there are
930 remaining packages in the list.
933 if self._opts_no_restart.intersection(self.myopts):
936 if not self._is_restart_necessary(pkg):
939 if pkg == self._mergelist[-1]:
942 self._main_loop_cleanup()
944 logger = self._logger
945 pkg_count = self._pkg_count
946 mtimedb = self._mtimedb
947 bad_resume_opts = self._bad_resume_opts
949 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
950 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
952 logger.log(" *** RESTARTING " + \
953 "emerge via exec() after change of " + \
956 mtimedb["resume"]["mergelist"].remove(list(pkg))
958 portage.run_exitfuncs()
959 # Don't trust sys.argv[0] here because eselect-python may modify it.
960 emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
961 mynewargv = [emerge_binary, "--resume"]
962 resume_opts = self.myopts.copy()
963 # For automatic resume, we need to prevent
964 # any of bad_resume_opts from leaking in
965 # via EMERGE_DEFAULT_OPTS.
966 resume_opts["--ignore-default-opts"] = True
967 for myopt, myarg in resume_opts.items():
968 if myopt not in bad_resume_opts:
970 mynewargv.append(myopt)
971 elif isinstance(myarg, list):
972 # arguments like --exclude that use 'append' action
974 mynewargv.append("%s=%s" % (myopt, x))
976 mynewargv.append("%s=%s" % (myopt, myarg))
977 # priority only needs to be adjusted on the first run
978 os.environ["PORTAGE_NICENESS"] = "0"
979 os.execv(mynewargv[0], mynewargv)
981 def _run_pkg_pretend(self):
985 for x in self._mergelist:
986 if not isinstance(x, Package):
989 if x.operation == "uninstall":
992 if x.metadata["EAPI"] in ("0", "1", "2", "3"):
995 if "pretend" not in x.metadata.defined_phases:
998 out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
999 portage.util.writemsg_stdout(out_str, noiselevel=-1)
1001 root_config = x.root_config
1002 settings = self.pkgsettings[root_config.root]
1004 tmpdir = tempfile.mkdtemp()
1005 tmpdir_orig = settings["PORTAGE_TMPDIR"]
1006 settings["PORTAGE_TMPDIR"] = tmpdir
1010 bintree = root_config.trees["bintree"].dbapi.bintree
1011 if bintree.isremote(x.cpv):
1012 fetcher = BinpkgPrefetcher(background=self._background,
1013 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=x, scheduler=self._sched_iface)
1017 tbz2_file = bintree.getname(x.cpv)
1018 infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
1020 portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
1021 ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
1022 settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
1023 settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
1027 portdb = root_config.trees["porttree"].dbapi
1028 ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
1029 if ebuild_path is None:
1030 raise AssertionError("ebuild not found for '%s'" % x.cpv)
1031 settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
1032 if self._build_opts.buildpkgonly:
1033 settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
1035 settings.configdict["pkg"]["MERGE_TYPE"] = "source"
1037 portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
1038 "pretend", settings=settings,
1039 db=self.trees[settings["ROOT"]][tree].dbapi)
1040 prepare_build_dirs(root_config.root, settings, cleanup=0)
1042 vardb = root_config.trees['vartree'].dbapi
1043 settings["REPLACING_VERSIONS"] = " ".join(
1044 set(portage.versions.cpv_getversion(match) \
1045 for match in vardb.match(x.slot_atom) + \
1046 vardb.match('='+x.cpv)))
1047 pretend_phase = EbuildPhase(background=self._background,
1048 phase="pretend", scheduler=self._sched_iface,
1051 pretend_phase.start()
1052 ret = pretend_phase.wait()
1054 portage.elog.elog_process(x.cpv, settings)
1057 shutil.rmtree(tmpdir)
1058 settings["PORTAGE_TMPDIR"] = tmpdir_orig
1067 if "--resume" in self.myopts:
1069 portage.writemsg_stdout(
1070 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
1071 self._logger.log(" *** Resuming merge...")
1073 self._save_resume_list()
1076 self._background = self._background_mode()
1077 except self._unknown_internal_error:
1080 for root in self.trees:
1081 root_config = self.trees[root]["root_config"]
1083 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
1084 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
1085 # for ensuring sane $PWD (bug #239560) and storing elog messages.
1086 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
1087 if not tmpdir or not os.path.isdir(tmpdir):
1088 msg = "The directory specified in your " + \
1089 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
1090 "does not exist. Please create this " + \
1091 "directory or correct your PORTAGE_TMPDIR setting."
1092 msg = textwrap.wrap(msg, 70)
1093 out = portage.output.EOutput()
1098 if self._background:
1099 root_config.settings.unlock()
1100 root_config.settings["PORTAGE_BACKGROUND"] = "1"
1101 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
1102 root_config.settings.lock()
1104 self.pkgsettings[root] = portage.config(
1105 clone=root_config.settings)
1107 keep_going = "--keep-going" in self.myopts
1108 fetchonly = self._build_opts.fetchonly
1109 mtimedb = self._mtimedb
1110 failed_pkgs = self._failed_pkgs
1112 rval = self._generate_digests()
1113 if rval != os.EX_OK:
1116 rval = self._env_sanity_check()
1117 if rval != os.EX_OK:
1120 # TODO: Immediately recalculate deps here if --keep-going
1121 # is enabled and corrupt manifests are detected.
1122 rval = self._check_manifests()
1123 if rval != os.EX_OK and not keep_going:
1126 rval = self._run_pkg_pretend()
1127 if rval != os.EX_OK:
1131 rval = self._merge()
1132 if rval == os.EX_OK or fetchonly or not keep_going:
1134 if "resume" not in mtimedb:
1136 mergelist = self._mtimedb["resume"].get("mergelist")
1143 for failed_pkg in failed_pkgs:
1144 mergelist.remove(list(failed_pkg.pkg))
1146 self._failed_pkgs_all.extend(failed_pkgs)
1152 if not self._calc_resume_list():
1155 clear_caches(self.trees)
1156 if not self._mergelist:
1159 self._save_resume_list()
1160 self._pkg_count.curval = 0
1161 self._pkg_count.maxval = len([x for x in self._mergelist \
1162 if isinstance(x, Package) and x.operation == "merge"])
1163 self._status_display.maxval = self._pkg_count.maxval
1165 self._logger.log(" *** Finished. Cleaning up...")
1168 self._failed_pkgs_all.extend(failed_pkgs)
1171 printer = portage.output.EOutput()
1172 background = self._background
1173 failure_log_shown = False
1174 if background and len(self._failed_pkgs_all) == 1:
1175 # If only one package failed then just show it's
1176 # whole log for easy viewing.
1177 failed_pkg = self._failed_pkgs_all[-1]
1178 build_dir = failed_pkg.build_dir
1181 log_paths = [failed_pkg.build_log]
1183 log_path = self._locate_failure_log(failed_pkg)
1184 if log_path is not None:
1186 log_file = open(_unicode_encode(log_path,
1187 encoding=_encodings['fs'], errors='strict'), mode='rb')
1191 if log_path.endswith('.gz'):
1192 log_file = gzip.GzipFile(filename='',
1193 mode='rb', fileobj=log_file)
1195 if log_file is not None:
1197 for line in log_file:
1198 writemsg_level(line, noiselevel=-1)
1199 except zlib.error as e:
1200 writemsg_level("%s\n" % (e,), level=logging.ERROR,
1204 failure_log_shown = True
1206 # Dump mod_echo output now since it tends to flood the terminal.
1207 # This allows us to avoid having more important output, generated
1208 # later, from being swept away by the mod_echo output.
1209 mod_echo_output = _flush_elog_mod_echo()
1211 if background and not failure_log_shown and \
1212 self._failed_pkgs_all and \
1213 self._failed_pkgs_die_msgs and \
1214 not mod_echo_output:
1216 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1218 if mysettings["ROOT"] != "/":
1219 root_msg = " merged to %s" % mysettings["ROOT"]
1221 printer.einfo("Error messages for package %s%s:" % \
1222 (colorize("INFORM", key), root_msg))
1224 for phase in portage.const.EBUILD_PHASES:
1225 if phase not in logentries:
1227 for msgtype, msgcontent in logentries[phase]:
1228 if isinstance(msgcontent, basestring):
1229 msgcontent = [msgcontent]
1230 for line in msgcontent:
1231 printer.eerror(line.strip("\n"))
1233 if self._post_mod_echo_msgs:
1234 for msg in self._post_mod_echo_msgs:
1237 if len(self._failed_pkgs_all) > 1 or \
1238 (self._failed_pkgs_all and keep_going):
1239 if len(self._failed_pkgs_all) > 1:
1240 msg = "The following %d packages have " % \
1241 len(self._failed_pkgs_all) + \
1242 "failed to build or install:"
1244 msg = "The following package has " + \
1245 "failed to build or install:"
1248 for line in textwrap.wrap(msg, 72):
1249 printer.eerror(line)
1251 for failed_pkg in self._failed_pkgs_all:
1252 # Use _unicode_decode() to force unicode format string so
1253 # that Package.__unicode__() is called in python2.
1254 msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
1255 log_path = self._locate_failure_log(failed_pkg)
1256 if log_path is not None:
1257 msg += ", Log file:"
1259 if log_path is not None:
1260 printer.eerror(" '%s'" % colorize('INFORM', log_path))
1263 if self._failed_pkgs_all:
1267 def _elog_listener(self, mysettings, key, logentries, fulltext):
1268 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1270 self._failed_pkgs_die_msgs.append(
1271 (mysettings, key, errors))
1273 def _locate_failure_log(self, failed_pkg):
1275 build_dir = failed_pkg.build_dir
1278 log_paths = [failed_pkg.build_log]
1280 for log_path in log_paths:
1285 log_size = os.stat(log_path).st_size
1296 def _add_packages(self):
1297 pkg_queue = self._pkg_queue
1298 for pkg in self._mergelist:
1299 if isinstance(pkg, Package):
1300 pkg_queue.append(pkg)
1301 elif isinstance(pkg, Blocker):
1304 def _system_merge_started(self, merge):
1306 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1308 graph = self._digraph
1311 pkg = merge.merge.pkg
1313 # Skip this if $ROOT != / since it shouldn't matter if there
1314 # are unsatisfied system runtime deps in this case.
1318 completed_tasks = self._completed_tasks
1319 unsatisfied = self._unsatisfied_system_deps
1321 def ignore_non_runtime_or_satisfied(priority):
1323 Ignore non-runtime and satisfied runtime priorities.
1325 if isinstance(priority, DepPriority) and \
1326 not priority.satisfied and \
1327 (priority.runtime or priority.runtime_post):
1331 # When checking for unsatisfied runtime deps, only check
1332 # direct deps since indirect deps are checked when the
1333 # corresponding parent is merged.
1334 for child in graph.child_nodes(pkg,
1335 ignore_priority=ignore_non_runtime_or_satisfied):
1336 if not isinstance(child, Package) or \
1337 child.operation == 'uninstall':
1341 if child.operation == 'merge' and \
1342 child not in completed_tasks:
1343 unsatisfied.add(child)
1345 def _merge_wait_exit_handler(self, task):
1346 self._merge_wait_scheduled.remove(task)
1347 self._merge_exit(task)
1349 def _merge_exit(self, merge):
1350 self._do_merge_exit(merge)
1351 self._deallocate_config(merge.merge.settings)
1352 if merge.returncode == os.EX_OK and \
1353 not merge.merge.pkg.installed:
1354 self._status_display.curval += 1
1355 self._status_display.merges = len(self._task_queues.merge)
1358 def _do_merge_exit(self, merge):
1359 pkg = merge.merge.pkg
1360 self._running_tasks.remove(pkg)
1361 if merge.returncode != os.EX_OK:
1362 settings = merge.merge.settings
1363 build_dir = settings.get("PORTAGE_BUILDDIR")
1364 build_log = settings.get("PORTAGE_LOG_FILE")
1366 self._failed_pkgs.append(self._failed_pkg(
1367 build_dir=build_dir, build_log=build_log,
1369 returncode=merge.returncode))
1370 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1372 self._status_display.failed = len(self._failed_pkgs)
1375 self._task_complete(pkg)
1376 pkg_to_replace = merge.merge.pkg_to_replace
1377 if pkg_to_replace is not None:
1378 # When a package is replaced, mark it's uninstall
1379 # task complete (if any).
1381 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
1382 self._task_complete(uninst_hash_key)
1387 self._restart_if_necessary(pkg)
1389 # Call mtimedb.commit() after each merge so that
1390 # --resume still works after being interrupted
1391 # by reboot, sigkill or similar.
1392 mtimedb = self._mtimedb
1393 mtimedb["resume"]["mergelist"].remove(list(pkg))
1394 if not mtimedb["resume"]["mergelist"]:
1395 del mtimedb["resume"]
1398 def _build_exit(self, build):
1399 if build.returncode == os.EX_OK:
1401 merge = PackageMerge(merge=build)
1402 if not build.build_opts.buildpkgonly and \
1403 build.pkg in self._deep_system_deps:
1404 # Since dependencies on system packages are frequently
1405 # unspecified, merge them only when no builds are executing.
1406 self._merge_wait_queue.append(merge)
1407 merge.addStartListener(self._system_merge_started)
1409 merge.addExitListener(self._merge_exit)
1410 self._task_queues.merge.add(merge)
1411 self._status_display.merges = len(self._task_queues.merge)
1413 self._running_tasks.remove(build.pkg)
1414 settings = build.settings
1415 build_dir = settings.get("PORTAGE_BUILDDIR")
1416 build_log = settings.get("PORTAGE_LOG_FILE")
1418 self._failed_pkgs.append(self._failed_pkg(
1419 build_dir=build_dir, build_log=build_log,
1421 returncode=build.returncode))
1422 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1424 self._status_display.failed = len(self._failed_pkgs)
1425 self._deallocate_config(build.settings)
1427 self._status_display.running = self._jobs
1430 def _extract_exit(self, build):
1431 self._build_exit(build)
1433 def _task_complete(self, pkg):
1434 self._completed_tasks.add(pkg)
1435 self._unsatisfied_system_deps.discard(pkg)
1436 self._choose_pkg_return_early = False
1440 self._add_prefetchers()
1441 self._add_packages()
1442 pkg_queue = self._pkg_queue
1443 failed_pkgs = self._failed_pkgs
1444 portage.locks._quiet = self._background
1445 portage.elog.add_listener(self._elog_listener)
1451 self._main_loop_cleanup()
1452 portage.locks._quiet = False
1453 portage.elog.remove_listener(self._elog_listener)
1455 rval = failed_pkgs[-1].returncode
1459 def _main_loop_cleanup(self):
1460 del self._pkg_queue[:]
1461 self._completed_tasks.clear()
1462 self._deep_system_deps.clear()
1463 self._unsatisfied_system_deps.clear()
1464 self._choose_pkg_return_early = False
1465 self._status_display.reset()
1466 self._digraph = None
1467 self._task_queues.fetch.clear()
1469 def _choose_pkg(self):
1471 Choose a task that has all it's dependencies satisfied.
1474 if self._choose_pkg_return_early:
1477 if self._digraph is None:
1478 if self._is_work_scheduled() and \
1479 not ("--nodeps" in self.myopts and \
1480 (self._max_jobs is True or self._max_jobs > 1)):
1481 self._choose_pkg_return_early = True
1483 return self._pkg_queue.pop(0)
1485 if not self._is_work_scheduled():
1486 return self._pkg_queue.pop(0)
1488 self._prune_digraph()
1492 # Prefer uninstall operations when available.
1493 graph = self._digraph
1494 for pkg in self._pkg_queue:
1495 if pkg.operation == 'uninstall' and \
1496 not graph.child_nodes(pkg):
1500 if chosen_pkg is None:
1501 later = set(self._pkg_queue)
1502 for pkg in self._pkg_queue:
1504 if not self._dependent_on_scheduled_merges(pkg, later):
1508 if chosen_pkg is not None:
1509 self._pkg_queue.remove(chosen_pkg)
1511 if chosen_pkg is None:
1512 # There's no point in searching for a package to
1513 # choose until at least one of the existing jobs
1515 self._choose_pkg_return_early = True
1519 def _dependent_on_scheduled_merges(self, pkg, later):
1521 Traverse the subgraph of the given packages deep dependencies
1522 to see if it contains any scheduled merges.
1523 @param pkg: a package to check dependencies for
1525 @param later: packages for which dependence should be ignored
1526 since they will be merged later than pkg anyway and therefore
1527 delaying the merge of pkg will not result in a more optimal
1531 @returns: True if the package is dependent, False otherwise.
1534 graph = self._digraph
1535 completed_tasks = self._completed_tasks
1538 traversed_nodes = set([pkg])
1539 direct_deps = graph.child_nodes(pkg)
1540 node_stack = direct_deps
1541 direct_deps = frozenset(direct_deps)
1543 node = node_stack.pop()
1544 if node in traversed_nodes:
1546 traversed_nodes.add(node)
1547 if not ((node.installed and node.operation == "nomerge") or \
1548 (node.operation == "uninstall" and \
1549 node not in direct_deps) or \
1550 node in completed_tasks or \
1555 # Don't traverse children of uninstall nodes since
1556 # those aren't dependencies in the usual sense.
1557 if node.operation != "uninstall":
1558 node_stack.extend(graph.child_nodes(node))
1562 def _allocate_config(self, root):
1564 Allocate a unique config instance for a task in order
1565 to prevent interference between parallel tasks.
1567 if self._config_pool[root]:
1568 temp_settings = self._config_pool[root].pop()
1570 temp_settings = portage.config(clone=self.pkgsettings[root])
1571 # Since config.setcpv() isn't guaranteed to call config.reset() due to
1572 # performance reasons, call it here to make sure all settings from the
1573 # previous package get flushed out (such as PORTAGE_LOG_FILE).
1574 temp_settings.reload()
1575 temp_settings.reset()
1576 return temp_settings
1578 def _deallocate_config(self, settings):
1579 self._config_pool[settings["ROOT"]].append(settings)
1581 def _main_loop(self):
1583 # Only allow 1 job max if a restart is scheduled
1584 # due to portage update.
1585 if self._is_restart_scheduled() or \
1586 self._opts_no_background.intersection(self.myopts):
1587 self._set_max_jobs(1)
1589 while self._schedule():
1590 if self._poll_event_handlers:
1595 if not self._is_work_scheduled():
1597 if self._poll_event_handlers:
1600 def _keep_scheduling(self):
1601 return bool(self._pkg_queue and \
1602 not (self._failed_pkgs and not self._build_opts.fetchonly))
1604 def _is_work_scheduled(self):
1605 return bool(self._running_tasks)
1607 def _schedule_tasks(self):
1611 # When the number of jobs drops to zero, process all waiting merges.
1612 if not self._jobs and self._merge_wait_queue:
1613 for task in self._merge_wait_queue:
1614 task.addExitListener(self._merge_wait_exit_handler)
1615 self._task_queues.merge.add(task)
1616 self._status_display.merges = len(self._task_queues.merge)
1617 self._merge_wait_scheduled.extend(self._merge_wait_queue)
1618 del self._merge_wait_queue[:]
1620 self._schedule_tasks_imp()
1621 self._status_display.display()
1624 for q in self._task_queues.values():
1628 # Cancel prefetchers if they're the only reason
1629 # the main poll loop is still running.
1630 if self._failed_pkgs and not self._build_opts.fetchonly and \
1631 not self._is_work_scheduled() and \
1632 self._task_queues.fetch:
1633 self._task_queues.fetch.clear()
1636 if not (state_change or \
1637 (not self._jobs and self._merge_wait_queue)):
1640 return self._keep_scheduling()
1642 def _job_delay(self):
1645 @returns: True if job scheduling should be delayed, False otherwise.
1648 if self._jobs and self._max_load is not None:
1650 current_time = time.time()
1652 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1653 if delay > self._job_delay_max:
1654 delay = self._job_delay_max
1655 if (current_time - self._previous_job_start_time) < delay:
1660 def _schedule_tasks_imp(self):
1663 @returns: True if state changed, False otherwise.
1670 if not self._keep_scheduling():
1671 return bool(state_change)
1673 if self._choose_pkg_return_early or \
1674 self._merge_wait_scheduled or \
1675 (self._jobs and self._unsatisfied_system_deps) or \
1676 not self._can_add_job() or \
1678 return bool(state_change)
1680 pkg = self._choose_pkg()
1682 return bool(state_change)
1686 if not pkg.installed:
1687 self._pkg_count.curval += 1
1689 task = self._task(pkg)
1690 self._running_tasks.add(pkg)
1693 merge = PackageMerge(merge=task)
1694 merge.addExitListener(self._merge_exit)
1695 self._task_queues.merge.addFront(merge)
1699 self._previous_job_start_time = time.time()
1700 self._status_display.running = self._jobs
1701 task.addExitListener(self._extract_exit)
1702 self._task_queues.jobs.add(task)
1706 self._previous_job_start_time = time.time()
1707 self._status_display.running = self._jobs
1708 task.addExitListener(self._build_exit)
1709 self._task_queues.jobs.add(task)
1711 return bool(state_change)
1713 def _task(self, pkg):
1715 pkg_to_replace = None
1716 if pkg.operation != "uninstall":
1717 vardb = pkg.root_config.trees["vartree"].dbapi
1718 previous_cpv = vardb.match(pkg.slot_atom)
1720 previous_cpv = previous_cpv.pop()
1721 pkg_to_replace = self._pkg(previous_cpv,
1722 "installed", pkg.root_config, installed=True)
1724 task = MergeListItem(args_set=self._args_set,
1725 background=self._background, binpkg_opts=self._binpkg_opts,
1726 build_opts=self._build_opts,
1727 config_pool=self._ConfigPool(pkg.root,
1728 self._allocate_config, self._deallocate_config),
1729 emerge_opts=self.myopts,
1730 find_blockers=self._find_blockers(pkg), logger=self._logger,
1731 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1732 pkg_to_replace=pkg_to_replace,
1733 prefetcher=self._prefetchers.get(pkg),
1734 scheduler=self._sched_iface,
1735 settings=self._allocate_config(pkg.root),
1736 statusMessage=self._status_msg,
1737 world_atom=self._world_atom)
1741 def _failed_pkg_msg(self, failed_pkg, action, preposition):
1742 pkg = failed_pkg.pkg
1743 msg = "%s to %s %s" % \
1744 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1746 msg += " %s %s" % (preposition, pkg.root)
1748 log_path = self._locate_failure_log(failed_pkg)
1749 if log_path is not None:
1750 msg += ", Log file:"
1751 self._status_msg(msg)
1753 if log_path is not None:
1754 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1756 def _status_msg(self, msg):
1758 Display a brief status message (no newlines) in the status display.
1759 This is called by tasks to provide feedback to the user. This
1760 delegates the resposibility of generating \r and \n control characters,
1761 to guarantee that lines are created or erased when necessary and
1765 @param msg: a brief status message (no newlines allowed)
1767 if not self._background:
1768 writemsg_level("\n")
1769 self._status_display.displayMessage(msg)
1771 def _save_resume_list(self):
1773 Do this before verifying the ebuild Manifests since it might
1774 be possible for the user to use --resume --skipfirst get past
1775 a non-essential package with a broken digest.
1777 mtimedb = self._mtimedb
1779 mtimedb["resume"] = {}
1780 # Stored as a dict starting with portage-2.1.6_rc1, and supported
1781 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
1782 # a list type for options.
1783 mtimedb["resume"]["myopts"] = self.myopts.copy()
1785 # Convert Atom instances to plain str.
1786 mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
1787 mtimedb["resume"]["mergelist"] = [list(x) \
1788 for x in self._mergelist \
1789 if isinstance(x, Package) and x.operation == "merge"]
1793 def _calc_resume_list(self):
1795 Use the current resume list to calculate a new one,
1796 dropping any packages with unsatisfied deps.
1798 @returns: True if successful, False otherwise.
1800 print(colorize("GOOD", "*** Resuming merge..."))
1802 # free some memory before creating
1803 # the resume depgraph
1804 self._destroy_graph()
1806 myparams = create_depgraph_params(self.myopts, None)
1810 success, mydepgraph, dropped_tasks = resume_depgraph(
1811 self.settings, self.trees, self._mtimedb, self.myopts,
1812 myparams, self._spinner)
1813 except depgraph.UnsatisfiedResumeDep as exc:
1814 # rename variable to avoid python-3.0 error:
1815 # SyntaxError: can not delete variable 'e' referenced in nested
1818 mydepgraph = e.depgraph
1819 dropped_tasks = set()
1822 def unsatisfied_resume_dep_msg():
1823 mydepgraph.display_problems()
1824 out = portage.output.EOutput()
1825 out.eerror("One or more packages are either masked or " + \
1826 "have missing dependencies:")
1829 show_parents = set()
1831 if dep.parent in show_parents:
1833 show_parents.add(dep.parent)
1834 if dep.atom is None:
1835 out.eerror(indent + "Masked package:")
1836 out.eerror(2 * indent + str(dep.parent))
1839 out.eerror(indent + str(dep.atom) + " pulled in by:")
1840 out.eerror(2 * indent + str(dep.parent))
1842 msg = "The resume list contains packages " + \
1843 "that are either masked or have " + \
1844 "unsatisfied dependencies. " + \
1845 "Please restart/continue " + \
1846 "the operation manually, or use --skipfirst " + \
1847 "to skip the first package in the list and " + \
1848 "any other packages that may be " + \
1849 "masked or have missing dependencies."
1850 for line in textwrap.wrap(msg, 72):
1852 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1855 if success and self._show_list():
1856 mylist = mydepgraph.altlist()
1858 if "--tree" in self.myopts:
1860 mydepgraph.display(mylist, favorites=self._favorites)
1863 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1865 mydepgraph.display_problems()
1866 self._init_graph(mydepgraph.schedulerGraph())
1869 for task in dropped_tasks:
1870 if not (isinstance(task, Package) and task.operation == "merge"):
1873 msg = "emerge --keep-going:" + \
1876 msg += " for %s" % (pkg.root,)
1877 msg += " dropped due to unsatisfied dependency."
1878 for line in textwrap.wrap(msg, msg_width):
1879 eerror(line, phase="other", key=pkg.cpv)
1880 settings = self.pkgsettings[pkg.root]
1881 # Ensure that log collection from $T is disabled inside
1882 # elog_process(), since any logs that might exist are
1884 settings.pop("T", None)
1885 portage.elog.elog_process(pkg.cpv, settings)
1886 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1890 def _show_list(self):
1891 myopts = self.myopts
1892 if "--quiet" not in myopts and \
1893 ("--ask" in myopts or "--tree" in myopts or \
1894 "--verbose" in myopts):
1898 def _world_atom(self, pkg):
1900 Add or remove the package to the world file, but only if
1901 it's supposed to be added or removed. Otherwise, do nothing.
1904 if set(("--buildpkgonly", "--fetchonly",
1906 "--oneshot", "--onlydeps",
1907 "--pretend")).intersection(self.myopts):
1910 if pkg.root != self.target_root:
1913 args_set = self._args_set
1914 if not args_set.findAtomForPackage(pkg):
1917 logger = self._logger
1918 pkg_count = self._pkg_count
1919 root_config = pkg.root_config
1920 world_set = root_config.sets["selected"]
1921 world_locked = False
1922 if hasattr(world_set, "lock"):
1927 if hasattr(world_set, "load"):
1928 world_set.load() # maybe it's changed on disk
1930 if pkg.operation == "uninstall":
1931 if hasattr(world_set, "cleanPackage"):
1932 world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
1934 if hasattr(world_set, "remove"):
1935 for s in pkg.root_config.setconfig.active:
1936 world_set.remove(SETPREFIX+s)
1938 atom = create_world_atom(pkg, args_set, root_config)
1940 if hasattr(world_set, "add"):
1941 self._status_msg(('Recording %s in "world" ' + \
1942 'favorites file...') % atom)
1943 logger.log(" === (%s of %s) Updating world file (%s)" % \
1944 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1947 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1948 (atom,), level=logging.WARN, noiselevel=-1)
1953 def _pkg(self, cpv, type_name, root_config, installed=False):
1955 Get a package instance from the cache, or create a new
1956 one if necessary. Raises KeyError from aux_get if it
1957 failures for some reason (package does not exist or is
1962 operation = "nomerge"
1964 if self._digraph is not None:
1965 # Reuse existing instance when available.
1966 pkg = self._digraph.get(
1967 (type_name, root_config.root, cpv, operation))
1971 tree_type = depgraph.pkg_tree_map[type_name]
1972 db = root_config.trees[tree_type].dbapi
1973 db_keys = list(self.trees[root_config.root][
1974 tree_type].dbapi._aux_cache_keys)
1975 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
1976 return Package(built=(type_name != 'ebuild'),
1977 cpv=cpv, metadata=metadata,
1978 root_config=root_config, installed=installed)