1 # Copyright 1999-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 from __future__ import print_function
12 from itertools import chain
15 from portage import os
16 from portage import digraph
17 from portage.dep import Atom
18 from portage.output import bold, blue, colorize, create_color_func, darkblue, \
19 darkgreen, green, nc_len, red, teal, turquoise, yellow
20 bad = create_color_func("BAD")
21 from portage.sets import SETPREFIX
22 from portage.sets.base import InternalPackageSet
23 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
24 from portage.util import writemsg_level
26 from _emerge.AtomArg import AtomArg
27 from _emerge.Blocker import Blocker
28 from _emerge.BlockerCache import BlockerCache
29 from _emerge.BlockerDepPriority import BlockerDepPriority
30 from _emerge.changelog import calc_changelog
31 from _emerge.countdown import countdown
32 from _emerge.create_world_atom import create_world_atom
33 from _emerge.Dependency import Dependency
34 from _emerge.DependencyArg import DependencyArg
35 from _emerge.DepPriority import DepPriority
36 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
37 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
38 from _emerge.FakeVartree import FakeVartree
39 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
40 from _emerge.format_size import format_size
41 from _emerge.is_valid_package_atom import is_valid_package_atom
42 from _emerge.Package import Package
43 from _emerge.PackageArg import PackageArg
44 from _emerge.PackageCounters import PackageCounters
45 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
46 from _emerge.RepoDisplay import RepoDisplay
47 from _emerge.RootConfig import RootConfig
48 from _emerge.search import search
49 from _emerge.SetArg import SetArg
50 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
51 from _emerge.UnmergeDepPriority import UnmergeDepPriority
52 from _emerge.visible import visible
54 if sys.hexversion >= 0x3000000:
58 class _frozen_depgraph_config(object):
60 def __init__(self, settings, trees, myopts, spinner):
61 self.settings = settings
62 self.target_root = settings["ROOT"]
65 if settings.get("PORTAGE_DEBUG", "") == "1":
67 self.spinner = spinner
68 self._running_root = trees["/"]["root_config"]
69 self._opts_no_restart = frozenset(["--buildpkgonly",
70 "--fetchonly", "--fetch-all-uri", "--pretend"])
73 self._trees_orig = trees
75 # All Package instances
78 self.trees[myroot] = {}
79 # Create a RootConfig instance that references
80 # the FakeVartree instead of the real one.
81 self.roots[myroot] = RootConfig(
82 trees[myroot]["vartree"].settings,
84 trees[myroot]["root_config"].setconfig)
85 for tree in ("porttree", "bintree"):
86 self.trees[myroot][tree] = trees[myroot][tree]
87 self.trees[myroot]["vartree"] = \
88 FakeVartree(trees[myroot]["root_config"],
89 pkg_cache=self._pkg_cache)
90 self.pkgsettings[myroot] = portage.config(
91 clone=self.trees[myroot]["vartree"].settings)
93 self._required_set_names = set(["system", "world"])
95 class _dynamic_depgraph_config(object):
97 def __init__(self, depgraph, myparams, allow_backtracking,
99 self.myparams = myparams.copy()
100 self._allow_backtracking = allow_backtracking
101 # Maps slot atom to package for each Package added to the graph.
102 self._slot_pkg_map = {}
103 # Maps nodes to the reasons they were selected for reinstallation.
104 self._reinstall_nodes = {}
106 # Contains a filtered view of preferred packages that are selected
107 # from available repositories.
108 self._filtered_trees = {}
109 # Contains installed packages and new packages that have been added
111 self._graph_trees = {}
112 # Caches visible packages returned from _select_package, for use in
113 # depgraph._iter_atoms_for_pkg() SLOT logic.
114 self._visible_pkgs = {}
115 #contains the args created by select_files
116 self._initial_arg_list = []
117 self.digraph = portage.digraph()
118 # contains all sets added to the graph
120 # contains atoms given as arguments
121 self._sets["args"] = InternalPackageSet()
122 # contains all atoms from all sets added to the graph, including
123 # atoms given as arguments
124 self._set_atoms = InternalPackageSet()
125 self._atom_arg_map = {}
126 # contains all nodes pulled in by self._set_atoms
127 self._set_nodes = set()
128 # Contains only Blocker -> Uninstall edges
129 self._blocker_uninstalls = digraph()
130 # Contains only Package -> Blocker edges
131 self._blocker_parents = digraph()
132 # Contains only irrelevant Package -> Blocker edges
133 self._irrelevant_blockers = digraph()
134 # Contains only unsolvable Package -> Blocker edges
135 self._unsolvable_blockers = digraph()
136 # Contains all Blocker -> Blocked Package edges
137 self._blocked_pkgs = digraph()
138 # Contains world packages that have been protected from
139 # uninstallation but may not have been added to the graph
140 # if the graph is not complete yet.
141 self._blocked_world_pkgs = {}
142 self._slot_collision_info = {}
143 # Slot collision nodes are not allowed to block other packages since
144 # blocker validation is only able to account for one package per slot.
145 self._slot_collision_nodes = set()
146 self._parent_atoms = {}
147 self._slot_conflict_parent_atoms = set()
148 self._serialized_tasks_cache = None
149 self._scheduler_graph = None
150 self._displayed_list = None
151 self._pprovided_args = []
152 self._missing_args = []
153 self._masked_installed = set()
154 self._unsatisfied_deps_for_display = []
155 self._unsatisfied_blockers_for_display = None
156 self._circular_deps_for_display = None
158 self._dep_disjunctive_stack = []
159 self._unsatisfied_deps = []
160 self._initially_unsatisfied_deps = []
161 self._ignored_deps = []
162 self._highest_pkg_cache = {}
163 if runtime_pkg_mask is None:
164 runtime_pkg_mask = {}
166 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
167 runtime_pkg_mask.items())
168 self._runtime_pkg_mask = runtime_pkg_mask
169 self._need_restart = False
171 for myroot in depgraph._frozen_config.trees:
172 self._slot_pkg_map[myroot] = {}
173 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
174 preload_installed_pkgs = \
175 "--nodeps" not in depgraph._frozen_config.myopts and \
176 "--buildpkgonly" not in depgraph._frozen_config.myopts
177 # This fakedbapi instance will model the state that the vdb will
178 # have after new packages have been installed.
179 fakedb = PackageVirtualDbapi(vardb.settings)
180 if preload_installed_pkgs:
182 depgraph._spinner_update()
183 # This triggers metadata updates via FakeVartree.
184 vardb.aux_get(pkg.cpv, [])
185 fakedb.cpv_inject(pkg)
187 # Now that the vardb state is cached in our FakeVartree,
188 # we won't be needing the real vartree cache for awhile.
189 # To make some room on the heap, clear the vardbapi
191 depgraph._frozen_config._trees_orig[myroot
192 ]["vartree"].dbapi._clear_cache()
195 self.mydbapi[myroot] = fakedb
198 graph_tree.dbapi = fakedb
199 self._graph_trees[myroot] = {}
200 self._filtered_trees[myroot] = {}
201 # Substitute the graph tree for the vartree in dep_check() since we
202 # want atom selections to be consistent with package selections
203 # have already been made.
204 self._graph_trees[myroot]["porttree"] = graph_tree
205 self._graph_trees[myroot]["vartree"] = graph_tree
208 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
209 self._filtered_trees[myroot]["porttree"] = filtered_tree
210 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
212 # Passing in graph_tree as the vartree here could lead to better
213 # atom selections in some cases by causing atoms for packages that
214 # have been added to the graph to be preferred over other choices.
215 # However, it can trigger atom selections that result in
216 # unresolvable direct circular dependencies. For example, this
217 # happens with gwydion-dylan which depends on either itself or
218 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
219 # gwydion-dylan-bin needs to be selected in order to avoid a
220 # an unresolvable direct circular dependency.
222 # To solve the problem described above, pass in "graph_db" so that
223 # packages that have been added to the graph are distinguishable
224 # from other available packages and installed packages. Also, pass
225 # the parent package into self._select_atoms() calls so that
226 # unresolvable direct circular dependencies can be detected and
227 # avoided when possible.
228 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
229 self._filtered_trees[myroot]["vartree"] = \
230 depgraph._frozen_config.trees[myroot]["vartree"]
233 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
234 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
235 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
236 # (db, pkg_type, built, installed, db_keys)
237 if "--usepkgonly" not in depgraph._frozen_config.myopts:
238 db_keys = list(portdb._aux_cache_keys)
239 dbs.append((portdb, "ebuild", False, False, db_keys))
240 if "--usepkg" in depgraph._frozen_config.myopts:
241 db_keys = list(bindb._aux_cache_keys)
242 dbs.append((bindb, "binary", True, False, db_keys))
243 db_keys = list(depgraph._frozen_config._trees_orig[myroot
244 ]["vartree"].dbapi._aux_cache_keys)
245 dbs.append((vardb, "installed", True, True, db_keys))
246 self._filtered_trees[myroot]["dbs"] = dbs
247 if "--usepkg" in depgraph._frozen_config.myopts:
248 depgraph._frozen_config._trees_orig[myroot
249 ]["bintree"].populate(
250 "--getbinpkg" in depgraph._frozen_config.myopts,
251 "--getbinpkgonly" in depgraph._frozen_config.myopts)
253 class depgraph(object):
255 pkg_tree_map = RootConfig.pkg_tree_map
257 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
259 def __init__(self, settings, trees, myopts, myparams, spinner,
260 frozen_config=None, runtime_pkg_mask=None, allow_backtracking=False):
261 if frozen_config is None:
262 frozen_config = _frozen_depgraph_config(settings, trees,
264 self._frozen_config = frozen_config
265 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
266 allow_backtracking, runtime_pkg_mask)
268 self._select_atoms = self._select_atoms_highest_available
269 self._select_package = self._select_pkg_highest_available
271 def _spinner_update(self):
272 if self._frozen_config.spinner:
273 self._frozen_config.spinner.update()
275 def _show_missed_update(self):
277 if '--quiet' in self._frozen_config.myopts and \
278 '--debug' not in self._frozen_config.myopts:
281 # In order to minimize noise, show only the highest
282 # missed update from each SLOT.
284 for pkg, mask_reasons in \
285 self._dynamic_config._runtime_pkg_mask.items():
287 # Exclude installed here since we only
288 # want to show available updates.
290 k = (pkg.root, pkg.slot_atom)
291 if k in missed_updates:
292 other_pkg, mask_type, parent_atoms = missed_updates[k]
295 for mask_type, parent_atoms in mask_reasons.items():
298 missed_updates[k] = (pkg, mask_type, parent_atoms)
301 if not missed_updates:
304 missed_update_types = {}
305 for pkg, mask_type, parent_atoms in missed_updates.values():
306 missed_update_types.setdefault(mask_type,
307 []).append((pkg, parent_atoms))
309 self._show_missed_update_slot_conflicts(
310 missed_update_types.get("slot conflict"))
312 self._show_missed_update_unsatisfied_dep(
313 missed_update_types.get("missing dependency"))
315 def _show_missed_update_unsatisfied_dep(self, missed_updates):
317 if not missed_updates:
320 write = sys.stderr.write
322 for pkg, parent_atoms in missed_updates:
324 write("\n!!! The following update has been skipped " + \
325 "due to unsatisfied dependencies:\n\n")
327 write(str(pkg.slot_atom))
329 write(" for %s" % (pkg.root,))
332 for parent, root, atom in parent_atoms:
333 self._show_unsatisfied_dep(root, atom, myparent=parent)
338 def _show_missed_update_slot_conflicts(self, missed_updates):
340 if not missed_updates:
344 msg.append("\n!!! One or more updates have been skipped due to " + \
345 "a dependency conflict:\n\n")
348 for pkg, parent_atoms in missed_updates:
349 msg.append(str(pkg.slot_atom))
351 msg.append(" for %s" % (pkg.root,))
354 for parent, atom in parent_atoms:
358 msg.append(" conflicts with\n")
360 if isinstance(parent,
361 (PackageArg, AtomArg)):
362 # For PackageArg and AtomArg types, it's
363 # redundant to display the atom attribute.
364 msg.append(str(parent))
366 # Display the specific atom from SetArg or
368 msg.append("%s required by %s" % (atom, parent))
372 sys.stderr.write("".join(msg))
375 def _show_slot_collision_notice(self):
376 """Show an informational message advising the user to mask one of the
377 the packages. In some cases it may be possible to resolve this
378 automatically, but support for backtracking (removal nodes that have
379 already been selected) will be required in order to handle all possible
383 if not self._dynamic_config._slot_collision_info:
386 self._show_merge_list()
389 msg.append("\n!!! Multiple package instances within a single " + \
390 "package slot have been pulled\n")
391 msg.append("!!! into the dependency graph, resulting" + \
392 " in a slot conflict:\n\n")
394 # Max number of parents shown, to avoid flooding the display.
396 explanation_columns = 70
398 for (slot_atom, root), slot_nodes \
399 in self._dynamic_config._slot_collision_info.items():
400 msg.append(str(slot_atom))
402 msg.append(" for %s" % (root,))
405 for node in slot_nodes:
407 msg.append(str(node))
408 parent_atoms = self._dynamic_config._parent_atoms.get(node)
411 # Prefer conflict atoms over others.
412 for parent_atom in parent_atoms:
413 if len(pruned_list) >= max_parents:
415 if parent_atom in self._dynamic_config._slot_conflict_parent_atoms:
416 pruned_list.add(parent_atom)
418 # If this package was pulled in by conflict atoms then
419 # show those alone since those are the most interesting.
421 # When generating the pruned list, prefer instances
422 # of DependencyArg over instances of Package.
423 for parent_atom in parent_atoms:
424 if len(pruned_list) >= max_parents:
426 parent, atom = parent_atom
427 if isinstance(parent, DependencyArg):
428 pruned_list.add(parent_atom)
429 # Prefer Packages instances that themselves have been
430 # pulled into collision slots.
431 for parent_atom in parent_atoms:
432 if len(pruned_list) >= max_parents:
434 parent, atom = parent_atom
435 if isinstance(parent, Package) and \
436 (parent.slot_atom, parent.root) \
437 in self._dynamic_config._slot_collision_info:
438 pruned_list.add(parent_atom)
439 for parent_atom in parent_atoms:
440 if len(pruned_list) >= max_parents:
442 pruned_list.add(parent_atom)
443 omitted_parents = len(parent_atoms) - len(pruned_list)
444 parent_atoms = pruned_list
445 msg.append(" pulled in by\n")
446 for parent_atom in parent_atoms:
447 parent, atom = parent_atom
449 if isinstance(parent,
450 (PackageArg, AtomArg)):
451 # For PackageArg and AtomArg types, it's
452 # redundant to display the atom attribute.
453 msg.append(str(parent))
455 # Display the specific atom from SetArg or
457 msg.append("%s required by %s" % (atom, parent))
461 msg.append("(and %d more)\n" % omitted_parents)
463 msg.append(" (no parents)\n")
465 explanation = self._slot_conflict_explanation(slot_nodes)
468 msg.append(indent + "Explanation:\n\n")
469 for line in textwrap.wrap(explanation, explanation_columns):
470 msg.append(2*indent + line + "\n")
473 sys.stderr.write("".join(msg))
476 explanations_for_all = explanations == len(self._dynamic_config._slot_collision_info)
478 if explanations_for_all or "--quiet" in self._frozen_config.myopts:
482 msg.append("It may be possible to solve this problem ")
483 msg.append("by using package.mask to prevent one of ")
484 msg.append("those packages from being selected. ")
485 msg.append("However, it is also possible that conflicting ")
486 msg.append("dependencies exist such that they are impossible to ")
487 msg.append("satisfy simultaneously. If such a conflict exists in ")
488 msg.append("the dependencies of two different packages, then those ")
489 msg.append("packages can not be installed simultaneously.")
491 from formatter import AbstractFormatter, DumbWriter
492 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
494 f.add_flowing_data(x)
498 msg.append("For more information, see MASKED PACKAGES ")
499 msg.append("section in the emerge man page or refer ")
500 msg.append("to the Gentoo Handbook.")
502 f.add_flowing_data(x)
506 def _slot_conflict_explanation(self, slot_nodes):
508 When a slot conflict occurs due to USE deps, there are a few
509 different cases to consider:
511 1) New USE are correctly set but --newuse wasn't requested so an
512 installed package with incorrect USE happened to get pulled
513 into graph before the new one.
515 2) New USE are incorrectly set but an installed package has correct
516 USE so it got pulled into the graph, and a new instance also got
517 pulled in due to --newuse or an upgrade.
519 3) Multiple USE deps exist that can't be satisfied simultaneously,
520 and multiple package instances got pulled into the same slot to
521 satisfy the conflicting deps.
523 Currently, explanations and suggested courses of action are generated
524 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
527 if len(slot_nodes) != 2:
528 # Suggestions are only implemented for
529 # conflicts between two packages.
532 all_conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms
535 unmatched_node = None
536 for node in slot_nodes:
537 parent_atoms = self._dynamic_config._parent_atoms.get(node)
539 # Normally, there are always parent atoms. If there are
540 # none then something unexpected is happening and there's
541 # currently no suggestion for this case.
543 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
544 for parent_atom in conflict_atoms:
545 parent, atom = parent_atom
547 # Suggestions are currently only implemented for cases
548 # in which all conflict atoms have USE deps.
551 if matched_node is not None:
552 # If conflict atoms match multiple nodes
553 # then there's no suggestion.
556 matched_atoms = conflict_atoms
558 if unmatched_node is not None:
559 # Neither node is matched by conflict atoms, and
560 # there is no suggestion for this case.
562 unmatched_node = node
564 if matched_node is None or unmatched_node is None:
565 # This shouldn't happen.
568 if unmatched_node.installed and not matched_node.installed and \
569 unmatched_node.cpv == matched_node.cpv:
570 # If the conflicting packages are the same version then
571 # --newuse should be all that's needed. If they are different
572 # versions then there's some other problem.
573 return "New USE are correctly set, but --newuse wasn't" + \
574 " requested, so an installed package with incorrect USE " + \
575 "happened to get pulled into the dependency graph. " + \
576 "In order to solve " + \
577 "this, either specify the --newuse option or explicitly " + \
578 " reinstall '%s'." % matched_node.slot_atom
580 if matched_node.installed and not unmatched_node.installed:
581 atoms = sorted(set(atom for parent, atom in matched_atoms))
582 explanation = ("New USE for '%s' are incorrectly set. " + \
583 "In order to solve this, adjust USE to satisfy '%s'") % \
584 (matched_node.slot_atom, atoms[0])
586 for atom in atoms[1:-1]:
587 explanation += ", '%s'" % (atom,)
590 explanation += " and '%s'" % (atoms[-1],)
596 def _process_slot_conflicts(self):
598 Process slot conflict data to identify specific atoms which
599 lead to conflict. These atoms only match a subset of the
600 packages that have been pulled into a given slot.
602 for (slot_atom, root), slot_nodes \
603 in self._dynamic_config._slot_collision_info.items():
605 all_parent_atoms = set()
606 for pkg in slot_nodes:
607 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
610 all_parent_atoms.update(parent_atoms)
612 for pkg in slot_nodes:
613 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
614 if parent_atoms is None:
616 self._dynamic_config._parent_atoms[pkg] = parent_atoms
617 for parent_atom in all_parent_atoms:
618 if parent_atom in parent_atoms:
620 # Use package set for matching since it will match via
621 # PROVIDE when necessary, while match_from_list does not.
622 parent, atom = parent_atom
623 atom_set = InternalPackageSet(
624 initial_atoms=(atom,))
625 if atom_set.findAtomForPackage(pkg):
626 parent_atoms.add(parent_atom)
628 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
630 def _reinstall_for_flags(self, forced_flags,
631 orig_use, orig_iuse, cur_use, cur_iuse):
632 """Return a set of flags that trigger reinstallation, or None if there
633 are no such flags."""
634 if "--newuse" in self._frozen_config.myopts or \
635 "--binpkg-respect-use" in self._frozen_config.myopts:
636 flags = set(orig_iuse.symmetric_difference(
637 cur_iuse).difference(forced_flags))
638 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
639 cur_iuse.intersection(cur_use)))
642 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
643 flags = orig_iuse.intersection(orig_use).symmetric_difference(
644 cur_iuse.intersection(cur_use))
649 def _create_graph(self, allow_unsatisfied=False):
650 dep_stack = self._dynamic_config._dep_stack
651 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
652 while dep_stack or dep_disjunctive_stack:
653 self._spinner_update()
655 dep = dep_stack.pop()
656 if isinstance(dep, Package):
657 if not self._add_pkg_deps(dep,
658 allow_unsatisfied=allow_unsatisfied):
661 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
663 if dep_disjunctive_stack:
664 if not self._pop_disjunction(allow_unsatisfied):
668 def _add_dep(self, dep, allow_unsatisfied=False):
669 debug = "--debug" in self._frozen_config.myopts
670 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
671 nodeps = "--nodeps" in self._frozen_config.myopts
672 empty = "empty" in self._dynamic_config.myparams
673 deep = self._dynamic_config.myparams.get("deep", 0)
674 recurse = empty or deep is True or dep.depth <= deep
676 if not buildpkgonly and \
678 dep.parent not in self._dynamic_config._slot_collision_nodes:
679 if dep.parent.onlydeps:
680 # It's safe to ignore blockers if the
681 # parent is an --onlydeps node.
683 # The blocker applies to the root where
684 # the parent is or will be installed.
685 blocker = Blocker(atom=dep.atom,
686 eapi=dep.parent.metadata["EAPI"],
687 root=dep.parent.root)
688 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
691 if dep.child is None:
692 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
693 onlydeps=dep.onlydeps)
695 # The caller has selected a specific package
696 # via self._minimize_packages().
698 existing_node = self._dynamic_config._slot_pkg_map[
699 dep.root].get(dep_pkg.slot_atom)
700 if existing_node is not dep_pkg:
704 if dep.priority.optional:
705 # This could be an unecessary build-time dep
706 # pulled in by --with-bdeps=y.
708 if allow_unsatisfied:
709 self._dynamic_config._unsatisfied_deps.append(dep)
711 self._dynamic_config._unsatisfied_deps_for_display.append(
712 ((dep.root, dep.atom), {"myparent":dep.parent}))
714 # The parent node should not already be in
715 # runtime_pkg_mask, since that would trigger an
716 # infinite backtracking loop.
717 if self._dynamic_config._allow_backtracking:
718 if dep.parent in self._dynamic_config._runtime_pkg_mask:
719 if "--debug" in self._frozen_config.myopts:
721 "!!! backtracking loop detected: %s %s\n" % \
723 self._dynamic_config._runtime_pkg_mask[
724 dep.parent]), noiselevel=-1)
726 # Do not backtrack if only USE have to be changed in
727 # order to satisfy the dependency.
728 dep_pkg, existing_node = \
729 self._select_package(dep.root, dep.atom.without_use,
730 onlydeps=dep.onlydeps)
732 self._dynamic_config._runtime_pkg_mask.setdefault(
733 dep.parent, {})["missing dependency"] = \
734 set([(dep.parent, dep.root, dep.atom)])
735 self._dynamic_config._need_restart = True
736 if "--debug" in self._frozen_config.myopts:
740 msg.append("backtracking due to unsatisfied dep:")
741 msg.append(" parent: %s" % dep.parent)
742 msg.append(" priority: %s" % dep.priority)
743 msg.append(" root: %s" % dep.root)
744 msg.append(" atom: %s" % dep.atom)
746 writemsg_level("".join("%s\n" % l for l in msg),
747 noiselevel=-1, level=logging.DEBUG)
750 # In some cases, dep_check will return deps that shouldn't
751 # be proccessed any further, so they are identified and
752 # discarded here. Try to discard as few as possible since
753 # discarded dependencies reduce the amount of information
754 # available for optimization of merge order.
755 if dep.priority.satisfied and \
756 not dep_pkg.installed and \
757 not (existing_node or recurse):
759 if dep.root == self._frozen_config.target_root:
761 myarg = next(self._iter_atoms_for_pkg(dep_pkg))
762 except StopIteration:
764 except portage.exception.InvalidDependString:
765 if not dep_pkg.installed:
766 # This shouldn't happen since the package
767 # should have been masked.
770 self._dynamic_config._ignored_deps.append(dep)
773 if not self._add_pkg(dep_pkg, dep):
777 def _add_pkg(self, pkg, dep):
784 myparent = dep.parent
785 priority = dep.priority
788 priority = DepPriority()
790 Fills the digraph with nodes comprised of packages to merge.
791 mybigkey is the package spec of the package to merge.
792 myparent is the package depending on mybigkey ( or None )
793 addme = Should we add this package to the digraph or are we just looking at it's deps?
794 Think --onlydeps, we need to ignore packages in that case.
797 #IUSE-aware emerge -> USE DEP aware depgraph
798 #"no downgrade" emerge
800 # Ensure that the dependencies of the same package
801 # are never processed more than once.
802 previously_added = pkg in self._dynamic_config.digraph
804 # select the correct /var database that we'll be checking against
805 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
806 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
811 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
812 except portage.exception.InvalidDependString as e:
813 if not pkg.installed:
814 show_invalid_depstring_notice(
815 pkg, pkg.metadata["PROVIDE"], str(e))
820 if not pkg.installed and \
821 "empty" not in self._dynamic_config.myparams and \
822 vardbapi.match(pkg.slot_atom):
823 # Increase the priority of dependencies on packages that
824 # are being rebuilt. This optimizes merge order so that
825 # dependencies are rebuilt/updated as soon as possible,
826 # which is needed especially when emerge is called by
827 # revdep-rebuild since dependencies may be affected by ABI
828 # breakage that has rendered them useless. Don't adjust
829 # priority here when in "empty" mode since all packages
830 # are being merged in that case.
831 priority.rebuild = True
833 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
834 slot_collision = False
836 existing_node_matches = pkg.cpv == existing_node.cpv
837 if existing_node_matches and \
838 pkg != existing_node and \
839 dep.atom is not None:
840 # Use package set for matching since it will match via
841 # PROVIDE when necessary, while match_from_list does not.
842 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
843 if not atom_set.findAtomForPackage(existing_node):
844 existing_node_matches = False
845 if existing_node_matches:
846 # The existing node can be reused.
848 for parent_atom in arg_atoms:
849 parent, atom = parent_atom
850 self._dynamic_config.digraph.add(existing_node, parent,
852 self._add_parent_atom(existing_node, parent_atom)
853 # If a direct circular dependency is not an unsatisfied
854 # buildtime dependency then drop it here since otherwise
855 # it can skew the merge order calculation in an unwanted
857 if existing_node != myparent or \
858 (priority.buildtime and not priority.satisfied):
859 self._dynamic_config.digraph.addnode(existing_node, myparent,
861 if dep.atom is not None and dep.parent is not None:
862 self._add_parent_atom(existing_node,
863 (dep.parent, dep.atom))
866 # A slot conflict has occurred.
867 # The existing node should not already be in
868 # runtime_pkg_mask, since that would trigger an
869 # infinite backtracking loop.
870 if self._dynamic_config._allow_backtracking and \
872 self._dynamic_config._runtime_pkg_mask:
873 if "--debug" in self._frozen_config.myopts:
875 "!!! backtracking loop detected: %s %s\n" % \
877 self._dynamic_config._runtime_pkg_mask[
878 existing_node]), noiselevel=-1)
879 elif self._dynamic_config._allow_backtracking and \
880 not self._accept_blocker_conflicts():
881 self._add_slot_conflict(pkg)
882 if dep.atom is not None and dep.parent is not None:
883 self._add_parent_atom(pkg, (dep.parent, dep.atom))
885 for parent_atom in arg_atoms:
886 parent, atom = parent_atom
887 self._add_parent_atom(pkg, parent_atom)
888 self._process_slot_conflicts()
891 self._dynamic_config._parent_atoms.get(pkg, set())
893 parent_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
894 if pkg >= existing_node:
895 # We only care about the parent atoms
896 # when they trigger a downgrade.
899 self._dynamic_config._runtime_pkg_mask.setdefault(
900 existing_node, {})["slot conflict"] = parent_atoms
901 self._dynamic_config._need_restart = True
902 if "--debug" in self._frozen_config.myopts:
906 msg.append("backtracking due to slot conflict:")
907 msg.append(" package: %s" % existing_node)
908 msg.append(" slot: %s" % existing_node.slot_atom)
909 msg.append(" parents: %s" % \
910 [(str(parent), atom) \
911 for parent, atom in parent_atoms])
913 writemsg_level("".join("%s\n" % l for l in msg),
914 noiselevel=-1, level=logging.DEBUG)
917 # A slot collision has occurred. Sometimes this coincides
918 # with unresolvable blockers, so the slot collision will be
919 # shown later if there are no unresolvable blockers.
920 self._add_slot_conflict(pkg)
921 slot_collision = True
924 # Now add this node to the graph so that self.display()
925 # can show use flags and --tree portage.output. This node is
926 # only being partially added to the graph. It must not be
927 # allowed to interfere with the other nodes that have been
928 # added. Do not overwrite data for existing nodes in
929 # self._dynamic_config.mydbapi since that data will be used for blocker
931 # Even though the graph is now invalid, continue to process
932 # dependencies so that things like --fetchonly can still
933 # function despite collisions.
935 elif not previously_added:
936 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
937 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
938 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
940 if not pkg.installed:
941 # Allow this package to satisfy old-style virtuals in case it
942 # doesn't already. Any pre-existing providers will be preferred
945 pkgsettings.setinst(pkg.cpv, pkg.metadata)
946 # For consistency, also update the global virtuals.
947 settings = self._frozen_config.roots[pkg.root].settings
949 settings.setinst(pkg.cpv, pkg.metadata)
951 except portage.exception.InvalidDependString as e:
952 show_invalid_depstring_notice(
953 pkg, pkg.metadata["PROVIDE"], str(e))
958 self._dynamic_config._set_nodes.add(pkg)
960 # Do this even when addme is False (--onlydeps) so that the
961 # parent/child relationship is always known in case
962 # self._show_slot_collision_notice() needs to be called later.
963 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
964 if dep.atom is not None and dep.parent is not None:
965 self._add_parent_atom(pkg, (dep.parent, dep.atom))
968 for parent_atom in arg_atoms:
969 parent, atom = parent_atom
970 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
971 self._add_parent_atom(pkg, parent_atom)
973 """ This section determines whether we go deeper into dependencies or not.
974 We want to go deeper on a few occasions:
975 Installing package A, we need to make sure package A's deps are met.
976 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
977 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
982 deep = self._dynamic_config.myparams.get("deep", 0)
983 empty = "empty" in self._dynamic_config.myparams
984 recurse = empty or deep is True or depth + 1 <= deep
985 dep_stack = self._dynamic_config._dep_stack
986 if "recurse" not in self._dynamic_config.myparams:
988 elif pkg.installed and not recurse:
989 dep_stack = self._dynamic_config._ignored_deps
991 self._spinner_update()
993 if not previously_added:
994 dep_stack.append(pkg)
997 def _add_parent_atom(self, pkg, parent_atom):
998 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
999 if parent_atoms is None:
1000 parent_atoms = set()
1001 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1002 parent_atoms.add(parent_atom)
1004 def _add_slot_conflict(self, pkg):
1005 self._dynamic_config._slot_collision_nodes.add(pkg)
1006 slot_key = (pkg.slot_atom, pkg.root)
1007 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1008 if slot_nodes is None:
1010 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1011 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1014 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1016 mytype = pkg.type_name
1019 metadata = pkg.metadata
1020 myuse = pkg.use.enabled
1022 depth = pkg.depth + 1
1023 removal_action = "remove" in self._dynamic_config.myparams
1026 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1028 edepend[k] = metadata[k]
1030 if not pkg.built and \
1031 "--buildpkgonly" in self._frozen_config.myopts and \
1032 "deep" not in self._dynamic_config.myparams and \
1033 "empty" not in self._dynamic_config.myparams:
1034 edepend["RDEPEND"] = ""
1035 edepend["PDEPEND"] = ""
1036 bdeps_optional = False
1038 if pkg.built and not removal_action:
1039 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1040 # Pull in build time deps as requested, but marked them as
1041 # "optional" since they are not strictly required. This allows
1042 # more freedom in the merge order calculation for solving
1043 # circular dependencies. Don't convert to PDEPEND since that
1044 # could make --with-bdeps=y less effective if it is used to
1045 # adjust merge order to prevent built_with_use() calls from
1047 bdeps_optional = True
1049 # built packages do not have build time dependencies.
1050 edepend["DEPEND"] = ""
1052 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1053 edepend["DEPEND"] = ""
1059 root_deps = self._frozen_config.myopts.get("--root-deps")
1060 if root_deps is not None:
1061 if root_deps is True:
1063 elif root_deps == "rdeps":
1064 edepend["DEPEND"] = ""
1067 (bdeps_root, edepend["DEPEND"],
1068 self._priority(buildtime=(not bdeps_optional),
1069 optional=bdeps_optional)),
1070 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
1071 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
1074 debug = "--debug" in self._frozen_config.myopts
1075 strict = mytype != "installed"
1078 portage.dep._dep_check_strict = False
1080 for dep_root, dep_string, dep_priority in deps:
1085 print("Parent: ", jbigkey)
1086 print("Depstring:", dep_string)
1087 print("Priority:", dep_priority)
1091 dep_string = portage.dep.paren_normalize(
1092 portage.dep.use_reduce(
1093 portage.dep.paren_reduce(dep_string),
1094 uselist=pkg.use.enabled))
1096 dep_string = list(self._queue_disjunctive_deps(
1097 pkg, dep_root, dep_priority, dep_string))
1099 except portage.exception.InvalidDependString as e:
1103 show_invalid_depstring_notice(pkg, dep_string, str(e))
1109 dep_string = portage.dep.paren_enclose(dep_string)
1111 if not self._add_pkg_dep_string(
1112 pkg, dep_root, dep_priority, dep_string,
1116 except portage.exception.AmbiguousPackageName as e:
1118 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1119 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1121 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1122 portage.writemsg("\n", noiselevel=-1)
1123 if mytype == "binary":
1125 "!!! This binary package cannot be installed: '%s'\n" % \
1126 mykey, noiselevel=-1)
1127 elif mytype == "ebuild":
1128 portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
1129 myebuild, mylocation = portdb.findname2(mykey)
1130 portage.writemsg("!!! This ebuild cannot be installed: " + \
1131 "'%s'\n" % myebuild, noiselevel=-1)
1132 portage.writemsg("!!! Please notify the package maintainer " + \
1133 "that atoms must be fully-qualified.\n", noiselevel=-1)
1136 portage.dep._dep_check_strict = True
1139 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1141 depth = pkg.depth + 1
1142 debug = "--debug" in self._frozen_config.myopts
1143 strict = pkg.type_name != "installed"
1147 print("Parent: ", pkg)
1148 print("Depstring:", dep_string)
1149 print("Priority:", dep_priority)
1152 selected_atoms = self._select_atoms(dep_root,
1153 dep_string, myuse=pkg.use.enabled, parent=pkg,
1154 strict=strict, priority=dep_priority)
1155 except portage.exception.InvalidDependString as e:
1156 show_invalid_depstring_notice(pkg, dep_string, str(e))
1163 print("Candidates:", selected_atoms)
1165 root_config = self._frozen_config.roots[dep_root]
1166 vardb = root_config.trees["vartree"].dbapi
1168 for atom, child in self._minimize_children(
1169 pkg, dep_priority, root_config, selected_atoms[pkg]):
1171 mypriority = dep_priority.copy()
1172 if not atom.blocker and vardb.match(atom):
1173 mypriority.satisfied = True
1175 if not self._add_dep(Dependency(atom=atom,
1176 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1177 priority=mypriority, root=dep_root),
1178 allow_unsatisfied=allow_unsatisfied):
1181 selected_atoms.pop(pkg)
1183 # Add selected indirect virtual deps to the graph. This
1184 # takes advantage of circular dependency avoidance that's done
1185 # by dep_zapdeps. We preserve actual parent/child relationships
1186 # here in order to avoid distorting the dependency graph like
1187 # <=portage-2.1.6.x did.
1188 for virt_pkg, atoms in selected_atoms.items():
1190 # Just assume depth + 1 here for now, though it's not entirely
1191 # accurate since multilple levels of indirect virtual deps may
1192 # have been traversed. The _add_pkg call will reset the depth to
1193 # 0 if this package happens to match an argument.
1194 if not self._add_pkg(virt_pkg,
1195 Dependency(atom=Atom('=' + virt_pkg.cpv),
1196 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1200 for atom, child in self._minimize_children(
1201 pkg, self._priority(runtime=True), root_config, atoms):
1202 # This is a GLEP 37 virtual, so its deps are all runtime.
1203 mypriority = self._priority(runtime=True)
1204 if not atom.blocker and vardb.match(atom):
1205 mypriority.satisfied = True
1207 if not self._add_dep(Dependency(atom=atom,
1208 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1209 parent=virt_pkg, priority=mypriority, root=dep_root),
1210 allow_unsatisfied=allow_unsatisfied):
1214 print("Exiting...", pkg)
1218 def _minimize_children(self, parent, priority, root_config, atoms):
1220 Selects packages to satisfy the given atoms, and minimizes the
1221 number of selected packages. This serves to identify and eliminate
1222 redundant package selections when multiple atoms happen to specify
1232 dep_pkg, existing_node = self._select_package(
1233 root_config.root, atom)
1237 atom_pkg_map[atom] = dep_pkg
1239 if len(atom_pkg_map) < 2:
1240 for item in atom_pkg_map.items():
1246 for atom, pkg in atom_pkg_map.items():
1247 pkg_atom_map.setdefault(pkg, set()).add(atom)
1248 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1250 for cp, pkgs in cp_pkg_map.items():
1253 for atom in pkg_atom_map[pkg]:
1257 # Use a digraph to identify and eliminate any
1258 # redundant package selections.
1259 atom_pkg_graph = digraph()
1262 for atom in pkg_atom_map[pkg1]:
1264 atom_pkg_graph.add(pkg1, atom)
1265 atom_set = InternalPackageSet(initial_atoms=(atom,))
1269 if atom_set.findAtomForPackage(pkg2):
1270 atom_pkg_graph.add(pkg2, atom)
1273 eliminate_pkg = True
1274 for atom in atom_pkg_graph.parent_nodes(pkg):
1275 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1276 eliminate_pkg = False
1279 atom_pkg_graph.remove(pkg)
1281 for atom in cp_atoms:
1282 child_pkgs = atom_pkg_graph.child_nodes(atom)
1283 yield (atom, child_pkgs[0])
1285 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1287 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1288 Yields non-disjunctive deps. Raises InvalidDependString when
1292 while i < len(dep_struct):
1294 if isinstance(x, list):
1295 for y in self._queue_disjunctive_deps(
1296 pkg, dep_root, dep_priority, x):
1299 self._queue_disjunction(pkg, dep_root, dep_priority,
1300 [ x, dep_struct[ i + 1 ] ] )
1304 x = portage.dep.Atom(x)
1305 except portage.exception.InvalidAtom:
1306 if not pkg.installed:
1307 raise portage.exception.InvalidDependString(
1308 "invalid atom: '%s'" % x)
1310 # Note: Eventually this will check for PROPERTIES=virtual
1311 # or whatever other metadata gets implemented for this
1313 if x.cp.startswith('virtual/'):
1314 self._queue_disjunction( pkg, dep_root,
1315 dep_priority, [ str(x) ] )
1320 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1321 self._dynamic_config._dep_disjunctive_stack.append(
1322 (pkg, dep_root, dep_priority, dep_struct))
1324 def _pop_disjunction(self, allow_unsatisfied):
1326 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1327 populate self._dynamic_config._dep_stack.
1329 pkg, dep_root, dep_priority, dep_struct = \
1330 self._dynamic_config._dep_disjunctive_stack.pop()
1331 dep_string = portage.dep.paren_enclose(dep_struct)
1332 if not self._add_pkg_dep_string(
1333 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1337 def _priority(self, **kwargs):
1338 if "remove" in self._dynamic_config.myparams:
1339 priority_constructor = UnmergeDepPriority
1341 priority_constructor = DepPriority
1342 return priority_constructor(**kwargs)
1344 def _dep_expand(self, root_config, atom_without_category):
1346 @param root_config: a root config instance
1347 @type root_config: RootConfig
1348 @param atom_without_category: an atom without a category component
1349 @type atom_without_category: String
1351 @returns: a list of atoms containing categories (possibly empty)
1353 null_cp = portage.dep_getkey(insert_category_into_atom(
1354 atom_without_category, "null"))
1355 cat, atom_pn = portage.catsplit(null_cp)
1357 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1359 for db, pkg_type, built, installed, db_keys in dbs:
1360 for cat in db.categories:
1361 if db.cp_list("%s/%s" % (cat, atom_pn)):
1365 for cat in categories:
1366 deps.append(Atom(insert_category_into_atom(
1367 atom_without_category, cat)))
1370 def _have_new_virt(self, root, atom_cp):
1372 for db, pkg_type, built, installed, db_keys in \
1373 self._dynamic_config._filtered_trees[root]["dbs"]:
1374 if db.cp_list(atom_cp):
1379 def _iter_atoms_for_pkg(self, pkg):
1380 # TODO: add multiple $ROOT support
1381 if pkg.root != self._frozen_config.target_root:
1383 atom_arg_map = self._dynamic_config._atom_arg_map
1384 root_config = self._frozen_config.roots[pkg.root]
1385 for atom in self._dynamic_config._set_atoms.iterAtomsForPackage(pkg):
1386 if atom.cp != pkg.cp and \
1387 self._have_new_virt(pkg.root, atom.cp):
1390 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1391 visible_pkgs.reverse() # descending order
1393 for visible_pkg in visible_pkgs:
1394 if visible_pkg.cp != atom.cp:
1396 if pkg >= visible_pkg:
1397 # This is descending order, and we're not
1398 # interested in any versions <= pkg given.
1400 if pkg.slot_atom != visible_pkg.slot_atom:
1401 higher_slot = visible_pkg
1403 if higher_slot is not None:
1405 for arg in atom_arg_map[(atom, pkg.root)]:
1406 if isinstance(arg, PackageArg) and \
1411 def select_files(self, myfiles):
1412 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1413 self._dynamic_config._initial_arg_list and call self._resolve to create the
1414 appropriate depgraph and return a favorite list."""
1415 debug = "--debug" in self._frozen_config.myopts
1416 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1417 sets = root_config.sets
1418 getSetAtoms = root_config.setconfig.getSetAtoms
1420 myroot = self._frozen_config.target_root
1421 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1422 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1423 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1424 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1425 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1426 pkgsettings = self._frozen_config.pkgsettings[myroot]
1428 onlydeps = "--onlydeps" in self._frozen_config.myopts
1431 ext = os.path.splitext(x)[1]
1433 if not os.path.exists(x):
1435 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1436 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1437 elif os.path.exists(
1438 os.path.join(pkgsettings["PKGDIR"], x)):
1439 x = os.path.join(pkgsettings["PKGDIR"], x)
1441 print("\n\n!!! Binary package '"+str(x)+"' does not exist.")
1442 print("!!! Please ensure the tbz2 exists as specified.\n")
1443 return 0, myfavorites
1444 mytbz2=portage.xpak.tbz2(x)
1445 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1446 if os.path.realpath(x) != \
1447 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1448 print(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n"))
1449 return 0, myfavorites
1451 pkg = self._pkg(mykey, "binary", root_config,
1453 args.append(PackageArg(arg=x, package=pkg,
1454 root_config=root_config))
1455 elif ext==".ebuild":
1456 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1457 pkgdir = os.path.dirname(ebuild_path)
1458 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1459 cp = pkgdir[len(tree_root)+1:]
1460 e = portage.exception.PackageNotFound(
1461 ("%s is not in a valid portage tree " + \
1462 "hierarchy or does not exist") % x)
1463 if not portage.isvalidatom(cp):
1465 cat = portage.catsplit(cp)[0]
1466 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1467 if not portage.isvalidatom("="+mykey):
1469 ebuild_path = portdb.findname(mykey)
1471 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1472 cp, os.path.basename(ebuild_path)):
1473 print(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n"))
1474 return 0, myfavorites
1475 if mykey not in portdb.xmatch(
1476 "match-visible", portage.dep_getkey(mykey)):
1477 print(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use"))
1478 print(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man"))
1479 print(colorize("BAD", "*** page for details."))
1480 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1483 raise portage.exception.PackageNotFound(
1484 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1485 pkg = self._pkg(mykey, "ebuild", root_config,
1487 args.append(PackageArg(arg=x, package=pkg,
1488 root_config=root_config))
1489 elif x.startswith(os.path.sep):
1490 if not x.startswith(myroot):
1491 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1492 " $ROOT.\n") % x, noiselevel=-1)
1494 # Queue these up since it's most efficient to handle
1495 # multiple files in a single iter_owners() call.
1496 lookup_owners.append(x)
1498 if x in ("system", "world"):
1500 if x.startswith(SETPREFIX):
1501 s = x[len(SETPREFIX):]
1503 raise portage.exception.PackageSetNotFound(s)
1504 if s in self._dynamic_config._sets:
1506 # Recursively expand sets so that containment tests in
1507 # self._get_parent_sets() properly match atoms in nested
1508 # sets (like if world contains system).
1509 expanded_set = InternalPackageSet(
1510 initial_atoms=getSetAtoms(s))
1511 self._dynamic_config._sets[s] = expanded_set
1512 args.append(SetArg(arg=x, set=expanded_set,
1513 root_config=root_config))
1515 if not is_valid_package_atom(x):
1516 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1518 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1519 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1521 # Don't expand categories or old-style virtuals here unless
1522 # necessary. Expansion of old-style virtuals here causes at
1523 # least the following problems:
1524 # 1) It's more difficult to determine which set(s) an atom
1525 # came from, if any.
1526 # 2) It takes away freedom from the resolver to choose other
1527 # possible expansions when necessary.
1529 args.append(AtomArg(arg=x, atom=Atom(x),
1530 root_config=root_config))
1532 expanded_atoms = self._dep_expand(root_config, x)
1533 installed_cp_set = set()
1534 for atom in expanded_atoms:
1535 if vardb.cp_list(atom.cp):
1536 installed_cp_set.add(atom.cp)
1538 if len(installed_cp_set) > 1:
1539 non_virtual_cps = set()
1540 for atom_cp in installed_cp_set:
1541 if not atom_cp.startswith("virtual/"):
1542 non_virtual_cps.add(atom_cp)
1543 if len(non_virtual_cps) == 1:
1544 installed_cp_set = non_virtual_cps
1546 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1547 installed_cp = next(iter(installed_cp_set))
1548 expanded_atoms = [atom for atom in expanded_atoms \
1549 if atom.cp == installed_cp]
1551 if len(expanded_atoms) > 1:
1554 ambiguous_package_name(x, expanded_atoms, root_config,
1555 self._frozen_config.spinner, self._frozen_config.myopts)
1556 return False, myfavorites
1558 atom = expanded_atoms[0]
1560 null_atom = Atom(insert_category_into_atom(x, "null"))
1561 cat, atom_pn = portage.catsplit(null_atom.cp)
1562 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1564 # Allow the depgraph to choose which virtual.
1565 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1569 args.append(AtomArg(arg=x, atom=atom,
1570 root_config=root_config))
1574 search_for_multiple = False
1575 if len(lookup_owners) > 1:
1576 search_for_multiple = True
1578 for x in lookup_owners:
1579 if not search_for_multiple and os.path.isdir(x):
1580 search_for_multiple = True
1581 relative_paths.append(x[len(myroot):])
1584 for pkg, relative_path in \
1585 real_vardb._owners.iter_owners(relative_paths):
1586 owners.add(pkg.mycpv)
1587 if not search_for_multiple:
1591 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1592 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1596 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1598 # portage now masks packages with missing slot, but it's
1599 # possible that one was installed by an older version
1600 atom = Atom(portage.cpv_getkey(cpv))
1602 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1603 args.append(AtomArg(arg=atom, atom=atom,
1604 root_config=root_config))
1606 if "--update" in self._frozen_config.myopts:
1607 # In some cases, the greedy slots behavior can pull in a slot that
1608 # the user would want to uninstall due to it being blocked by a
1609 # newer version in a different slot. Therefore, it's necessary to
1610 # detect and discard any that should be uninstalled. Each time
1611 # that arguments are updated, package selections are repeated in
1612 # order to ensure consistency with the current arguments:
1614 # 1) Initialize args
1615 # 2) Select packages and generate initial greedy atoms
1616 # 3) Update args with greedy atoms
1617 # 4) Select packages and generate greedy atoms again, while
1618 # accounting for any blockers between selected packages
1619 # 5) Update args with revised greedy atoms
1621 self._set_args(args)
1624 greedy_args.append(arg)
1625 if not isinstance(arg, AtomArg):
1627 for atom in self._greedy_slots(arg.root_config, arg.atom):
1629 AtomArg(arg=arg.arg, atom=atom,
1630 root_config=arg.root_config))
1632 self._set_args(greedy_args)
1635 # Revise greedy atoms, accounting for any blockers
1636 # between selected packages.
1637 revised_greedy_args = []
1639 revised_greedy_args.append(arg)
1640 if not isinstance(arg, AtomArg):
1642 for atom in self._greedy_slots(arg.root_config, arg.atom,
1643 blocker_lookahead=True):
1644 revised_greedy_args.append(
1645 AtomArg(arg=arg.arg, atom=atom,
1646 root_config=arg.root_config))
1647 args = revised_greedy_args
1648 del revised_greedy_args
1650 self._set_args(args)
1652 myfavorites = set(myfavorites)
1654 if isinstance(arg, (AtomArg, PackageArg)):
1655 myfavorites.add(arg.atom)
1656 elif isinstance(arg, SetArg):
1657 myfavorites.add(arg.arg)
1658 myfavorites = list(myfavorites)
1661 portage.writemsg("\n", noiselevel=-1)
1662 # Order needs to be preserved since a feature of --nodeps
1663 # is to allow the user to force a specific merge order.
1664 self._dynamic_config._initial_arg_list = args[:]
1666 return self._resolve(myfavorites)
1668 def _resolve(self, myfavorites):
1669 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1670 call self._creategraph to process theier deps and return
1672 debug = "--debug" in self._frozen_config.myopts
1673 onlydeps = "--onlydeps" in self._frozen_config.myopts
1674 myroot = self._frozen_config.target_root
1675 pkgsettings = self._frozen_config.pkgsettings[myroot]
1676 pprovideddict = pkgsettings.pprovideddict
1677 virtuals = pkgsettings.getvirtuals()
1678 for arg in self._dynamic_config._initial_arg_list:
1679 for atom in arg.set:
1680 self._spinner_update()
1681 dep = Dependency(atom=atom, onlydeps=onlydeps,
1682 root=myroot, parent=arg)
1684 pprovided = pprovideddict.get(atom.cp)
1685 if pprovided and portage.match_from_list(atom, pprovided):
1686 # A provided package has been specified on the command line.
1687 self._dynamic_config._pprovided_args.append((arg, atom))
1689 if isinstance(arg, PackageArg):
1690 if not self._add_pkg(arg.package, dep) or \
1691 not self._create_graph():
1692 if not self._dynamic_config._need_restart:
1693 sys.stderr.write(("\n\n!!! Problem " + \
1694 "resolving dependencies for %s\n") % \
1696 return 0, myfavorites
1699 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1700 (arg, atom), noiselevel=-1)
1701 pkg, existing_node = self._select_package(
1702 myroot, atom, onlydeps=onlydeps)
1704 pprovided_match = False
1705 for virt_choice in virtuals.get(atom.cp, []):
1706 expanded_atom = portage.dep.Atom(
1707 atom.replace(atom.cp,
1708 portage.dep_getkey(virt_choice), 1))
1709 pprovided = pprovideddict.get(expanded_atom.cp)
1711 portage.match_from_list(expanded_atom, pprovided):
1712 # A provided package has been
1713 # specified on the command line.
1714 self._dynamic_config._pprovided_args.append((arg, atom))
1715 pprovided_match = True
1720 if not (isinstance(arg, SetArg) and \
1721 arg.name in ("system", "world")):
1722 self._dynamic_config._unsatisfied_deps_for_display.append(
1723 ((myroot, atom), {}))
1724 return 0, myfavorites
1725 self._dynamic_config._missing_args.append((arg, atom))
1727 if atom.cp != pkg.cp:
1728 # For old-style virtuals, we need to repeat the
1729 # package.provided check against the selected package.
1730 expanded_atom = atom.replace(atom.cp, pkg.cp)
1731 pprovided = pprovideddict.get(pkg.cp)
1733 portage.match_from_list(expanded_atom, pprovided):
1734 # A provided package has been
1735 # specified on the command line.
1736 self._dynamic_config._pprovided_args.append((arg, atom))
1738 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1739 self._dynamic_config._unsatisfied_deps_for_display.append(
1740 ((myroot, atom), {}))
1741 # Previous behavior was to bail out in this case, but
1742 # since the dep is satisfied by the installed package,
1743 # it's more friendly to continue building the graph
1744 # and just show a warning message. Therefore, only bail
1745 # out here if the atom is not from either the system or
1747 if not (isinstance(arg, SetArg) and \
1748 arg.name in ("system", "world")):
1749 return 0, myfavorites
1751 # Add the selected package to the graph as soon as possible
1752 # so that later dep_check() calls can use it as feedback
1753 # for making more consistent atom selections.
1754 if not self._add_pkg(pkg, dep):
1755 if self._dynamic_config._need_restart:
1757 elif isinstance(arg, SetArg):
1758 sys.stderr.write(("\n\n!!! Problem resolving " + \
1759 "dependencies for %s from %s\n") % \
1762 sys.stderr.write(("\n\n!!! Problem resolving " + \
1763 "dependencies for %s\n") % atom)
1764 return 0, myfavorites
1766 except portage.exception.MissingSignature as e:
1767 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1768 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1769 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1770 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1771 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1772 return 0, myfavorites
1773 except portage.exception.InvalidSignature as e:
1774 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1775 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1776 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1777 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1778 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1779 return 0, myfavorites
1780 except SystemExit as e:
1781 raise # Needed else can't exit
1782 except Exception as e:
1783 print("\n\n!!! Problem in '%s' dependencies." % atom, file=sys.stderr)
1784 print("!!!", str(e), getattr(e, "__module__", None), file=sys.stderr)
1787 # Now that the root packages have been added to the graph,
1788 # process the dependencies.
1789 if not self._create_graph():
1790 return 0, myfavorites
1793 if "--usepkgonly" in self._frozen_config.myopts:
1794 for xs in self._dynamic_config.digraph.all_nodes():
1795 if not isinstance(xs, Package):
1797 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1801 print("Missing binary for:",xs[2])
1805 except self._unknown_internal_error:
1806 return False, myfavorites
1808 # We're true here unless we are missing binaries.
1809 return (not missing,myfavorites)
1811 def _set_args(self, args):
1813 Create the "args" package set from atoms and packages given as
1814 arguments. This method can be called multiple times if necessary.
1815 The package selection cache is automatically invalidated, since
1816 arguments influence package selections.
1818 args_set = self._dynamic_config._sets["args"]
1821 if not isinstance(arg, (AtomArg, PackageArg)):
1824 if atom in args_set:
1828 self._dynamic_config._set_atoms.clear()
1829 self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.values()))
1830 atom_arg_map = self._dynamic_config._atom_arg_map
1831 atom_arg_map.clear()
1833 for atom in arg.set:
1834 atom_key = (atom, arg.root_config.root)
1835 refs = atom_arg_map.get(atom_key)
1838 atom_arg_map[atom_key] = refs
1842 # Invalidate the package selection cache, since
1843 # arguments influence package selections.
1844 self._dynamic_config._highest_pkg_cache.clear()
1845 for trees in self._dynamic_config._filtered_trees.values():
1846 trees["porttree"].dbapi._clear_cache()
1848 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1850 Return a list of slot atoms corresponding to installed slots that
1851 differ from the slot of the highest visible match. When
1852 blocker_lookahead is True, slot atoms that would trigger a blocker
1853 conflict are automatically discarded, potentially allowing automatic
1854 uninstallation of older slots when appropriate.
1856 highest_pkg, in_graph = self._select_package(root_config.root, atom)
1857 if highest_pkg is None:
1859 vardb = root_config.trees["vartree"].dbapi
1861 for cpv in vardb.match(atom):
1862 # don't mix new virtuals with old virtuals
1863 if portage.cpv_getkey(cpv) == highest_pkg.cp:
1864 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1866 slots.add(highest_pkg.metadata["SLOT"])
1870 slots.remove(highest_pkg.metadata["SLOT"])
1873 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1874 pkg, in_graph = self._select_package(root_config.root, slot_atom)
1875 if pkg is not None and \
1876 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1877 greedy_pkgs.append(pkg)
1880 if not blocker_lookahead:
1881 return [pkg.slot_atom for pkg in greedy_pkgs]
1884 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1885 for pkg in greedy_pkgs + [highest_pkg]:
1886 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1888 selected_atoms = self._select_atoms(
1889 pkg.root, dep_str, pkg.use.enabled,
1890 parent=pkg, strict=True)
1891 except portage.exception.InvalidDependString:
1894 for atoms in selected_atoms.values():
1895 blocker_atoms.extend(x for x in atoms if x.blocker)
1896 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
1898 if highest_pkg not in blockers:
1901 # filter packages with invalid deps
1902 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
1904 # filter packages that conflict with highest_pkg
1905 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
1906 (blockers[highest_pkg].findAtomForPackage(pkg) or \
1907 blockers[pkg].findAtomForPackage(highest_pkg))]
1912 # If two packages conflict, discard the lower version.
1913 discard_pkgs = set()
1914 greedy_pkgs.sort(reverse=True)
1915 for i in range(len(greedy_pkgs) - 1):
1916 pkg1 = greedy_pkgs[i]
1917 if pkg1 in discard_pkgs:
1919 for j in range(i + 1, len(greedy_pkgs)):
1920 pkg2 = greedy_pkgs[j]
1921 if pkg2 in discard_pkgs:
1923 if blockers[pkg1].findAtomForPackage(pkg2) or \
1924 blockers[pkg2].findAtomForPackage(pkg1):
1926 discard_pkgs.add(pkg2)
1928 return [pkg.slot_atom for pkg in greedy_pkgs \
1929 if pkg not in discard_pkgs]
1931 def _select_atoms_from_graph(self, *pargs, **kwargs):
1933 Prefer atoms matching packages that have already been
1934 added to the graph or those that are installed and have
1935 not been scheduled for replacement.
1937 kwargs["trees"] = self._dynamic_config._graph_trees
1938 return self._select_atoms_highest_available(*pargs, **kwargs)
1940 def _select_atoms_highest_available(self, root, depstring,
1941 myuse=None, parent=None, strict=True, trees=None, priority=None):
1942 """This will raise InvalidDependString if necessary. If trees is
1943 None then self._dynamic_config._filtered_trees is used."""
1944 pkgsettings = self._frozen_config.pkgsettings[root]
1946 trees = self._dynamic_config._filtered_trees
1947 atom_graph = digraph()
1950 if parent is not None:
1951 trees[root]["parent"] = parent
1952 trees[root]["atom_graph"] = atom_graph
1953 if priority is not None:
1954 trees[root]["priority"] = priority
1956 portage.dep._dep_check_strict = False
1957 mycheck = portage.dep_check(depstring, None,
1958 pkgsettings, myuse=myuse,
1959 myroot=root, trees=trees)
1961 if parent is not None:
1962 trees[root].pop("parent")
1963 trees[root].pop("atom_graph")
1964 if priority is not None:
1965 trees[root].pop("priority")
1966 portage.dep._dep_check_strict = True
1968 raise portage.exception.InvalidDependString(mycheck[1])
1970 selected_atoms = mycheck[1]
1972 chosen_atoms = frozenset(mycheck[1])
1973 selected_atoms = {parent : []}
1974 for node in atom_graph:
1975 if isinstance(node, Atom):
1980 pkg, virt_atom = node
1981 if virt_atom not in chosen_atoms:
1983 if not portage.match_from_list(virt_atom, [pkg]):
1984 # Typically this means that the atom
1985 # specifies USE deps that are unsatisfied
1986 # by the selected package. The caller will
1987 # record this as an unsatisfied dependency
1991 selected_atoms[pkg] = [atom for atom in \
1992 atom_graph.child_nodes(node) if atom in chosen_atoms]
1994 return selected_atoms
1996 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
1997 atom_set = InternalPackageSet(initial_atoms=(atom,))
1998 xinfo = '"%s"' % atom
2001 # Discard null/ from failed cpv_expand category expansion.
2002 xinfo = xinfo.replace("null/", "")
2003 masked_packages = []
2005 masked_pkg_instances = set()
2006 missing_licenses = []
2007 have_eapi_mask = False
2008 pkgsettings = self._frozen_config.pkgsettings[root]
2009 implicit_iuse = pkgsettings._get_implicit_iuse()
2010 root_config = self._frozen_config.roots[root]
2011 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2012 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2013 for db, pkg_type, built, installed, db_keys in dbs:
2017 if hasattr(db, "xmatch"):
2018 cpv_list = db.xmatch("match-all", atom.without_use)
2020 cpv_list = db.match(atom.without_use)
2023 for cpv in cpv_list:
2024 metadata, mreasons = get_mask_info(root_config, cpv,
2025 pkgsettings, db, pkg_type, built, installed, db_keys)
2026 if metadata is not None:
2027 pkg = self._pkg(cpv, pkg_type, root_config,
2028 installed=installed)
2029 if pkg.cp != atom.cp:
2030 # A cpv can be returned from dbapi.match() as an
2031 # old-style virtual match even in cases when the
2032 # package does not actually PROVIDE the virtual.
2033 # Filter out any such false matches here.
2034 if not atom_set.findAtomForPackage(pkg):
2036 if pkg in self._dynamic_config._runtime_pkg_mask:
2037 backtrack_reasons = \
2038 self._dynamic_config._runtime_pkg_mask[pkg]
2039 mreasons.append('backtracking: %s' % \
2040 ', '.join(sorted(backtrack_reasons)))
2042 masked_pkg_instances.add(pkg)
2044 missing_use.append(pkg)
2047 masked_packages.append(
2048 (root_config, pkgsettings, cpv, metadata, mreasons))
2050 missing_use_reasons = []
2051 missing_iuse_reasons = []
2052 for pkg in missing_use:
2053 use = pkg.use.enabled
2054 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
2055 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
2057 for x in atom.use.required:
2058 if iuse_re.match(x) is None:
2059 missing_iuse.append(x)
2062 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2063 missing_iuse_reasons.append((pkg, mreasons))
2065 need_enable = sorted(atom.use.enabled.difference(use))
2066 need_disable = sorted(atom.use.disabled.intersection(use))
2067 if need_enable or need_disable:
2069 changes.extend(colorize("red", "+" + x) \
2070 for x in need_enable)
2071 changes.extend(colorize("blue", "-" + x) \
2072 for x in need_disable)
2073 mreasons.append("Change USE: %s" % " ".join(changes))
2074 missing_use_reasons.append((pkg, mreasons))
2076 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2077 in missing_use_reasons if pkg not in masked_pkg_instances]
2079 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2080 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2082 show_missing_use = False
2083 if unmasked_use_reasons:
2084 # Only show the latest version.
2085 show_missing_use = unmasked_use_reasons[:1]
2086 elif unmasked_iuse_reasons:
2087 if missing_use_reasons:
2088 # All packages with required IUSE are masked,
2089 # so display a normal masking message.
2092 show_missing_use = unmasked_iuse_reasons
2094 if show_missing_use:
2095 print("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".")
2096 print("!!! One of the following packages is required to complete your request:")
2097 for pkg, mreasons in show_missing_use:
2098 print("- "+pkg.cpv+" ("+", ".join(mreasons)+")")
2100 elif masked_packages:
2102 colorize("BAD", "All ebuilds that could satisfy ") + \
2103 colorize("INFORM", xinfo) + \
2104 colorize("BAD", " have been masked."))
2105 print("!!! One of the following masked packages is required to complete your request:")
2106 have_eapi_mask = show_masked_packages(masked_packages)
2109 msg = ("The current version of portage supports " + \
2110 "EAPI '%s'. You must upgrade to a newer version" + \
2111 " of portage before EAPI masked packages can" + \
2112 " be installed.") % portage.const.EAPI
2113 from textwrap import wrap
2114 for line in wrap(msg, 75):
2119 print("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".")
2121 # Show parent nodes and the argument that pulled them in.
2122 traversed_nodes = set()
2125 while node is not None:
2126 traversed_nodes.add(node)
2127 msg.append('(dependency required by "%s" [%s])' % \
2128 (colorize('INFORM', str(node.cpv)), node.type_name))
2130 if node not in self._dynamic_config.digraph:
2131 # The parent is not in the graph due to backtracking.
2134 # When traversing to parents, prefer arguments over packages
2135 # since arguments are root nodes. Never traverse the same
2136 # package twice, in order to prevent an infinite loop.
2137 selected_parent = None
2138 for parent in self._dynamic_config.digraph.parent_nodes(node):
2139 if isinstance(parent, DependencyArg):
2140 msg.append('(dependency required by "%s" [argument])' % \
2141 (colorize('INFORM', str(parent))))
2142 selected_parent = None
2144 if parent not in traversed_nodes:
2145 selected_parent = parent
2146 node = selected_parent
2152 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2154 Iterate over Package instances of pkg_type matching the given atom.
2155 This does not check visibility and it also does not match USE for
2156 unbuilt ebuilds since USE are lazily calculated after visibility
2157 checks (to avoid the expense when possible).
2160 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2162 if hasattr(db, "xmatch"):
2163 cpv_list = db.xmatch("match-all", atom)
2165 cpv_list = db.match(atom)
2167 # USE=multislot can make an installed package appear as if
2168 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2169 # won't do any good as long as USE=multislot is enabled since
2170 # the newly built package still won't have the expected slot.
2171 # Therefore, assume that such SLOT dependencies are already
2172 # satisfied rather than forcing a rebuild.
2173 installed = pkg_type == 'installed'
2174 if installed and not cpv_list and atom.slot:
2175 for cpv in db.match(atom.cp):
2176 slot_available = False
2177 for other_db, other_type, other_built, \
2178 other_installed, other_keys in \
2179 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2182 other_db.aux_get(cpv, ["SLOT"])[0]:
2183 slot_available = True
2187 if not slot_available:
2189 inst_pkg = self._pkg(cpv, "installed",
2190 root_config, installed=installed)
2191 # Remove the slot from the atom and verify that
2192 # the package matches the resulting atom.
2193 atom_without_slot = portage.dep.remove_slot(atom)
2195 atom_without_slot += str(atom.use)
2196 atom_without_slot = portage.dep.Atom(atom_without_slot)
2197 if portage.match_from_list(
2198 atom_without_slot, [inst_pkg]):
2199 cpv_list = [inst_pkg.cpv]
2206 for cpv in cpv_list:
2208 pkg = self._pkg(cpv, pkg_type, root_config,
2209 installed=installed, onlydeps=onlydeps)
2210 except portage.exception.PackageNotFound:
2213 if pkg.cp != atom.cp:
2214 # A cpv can be returned from dbapi.match() as an
2215 # old-style virtual match even in cases when the
2216 # package does not actually PROVIDE the virtual.
2217 # Filter out any such false matches here.
2218 if not InternalPackageSet(initial_atoms=(atom,)
2219 ).findAtomForPackage(pkg):
2223 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2224 cache_key = (root, atom, onlydeps)
2225 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2228 if pkg and not existing:
2229 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2230 if existing and existing == pkg:
2231 # Update the cache to reflect that the
2232 # package has been added to the graph.
2234 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2236 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2237 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2240 settings = pkg.root_config.settings
2241 if visible(settings, pkg) and not (pkg.installed and \
2242 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2243 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2246 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2247 root_config = self._frozen_config.roots[root]
2248 pkgsettings = self._frozen_config.pkgsettings[root]
2249 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2250 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2251 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2252 # List of acceptable packages, ordered by type preference.
2253 matched_packages = []
2254 highest_version = None
2255 if not isinstance(atom, portage.dep.Atom):
2256 atom = portage.dep.Atom(atom)
2258 atom_set = InternalPackageSet(initial_atoms=(atom,))
2259 existing_node = None
2261 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2262 empty = "empty" in self._dynamic_config.myparams
2263 selective = "selective" in self._dynamic_config.myparams
2265 noreplace = "--noreplace" in self._frozen_config.myopts
2266 avoid_update = "--update" not in self._frozen_config.myopts
2267 # Behavior of the "selective" parameter depends on
2268 # whether or not a package matches an argument atom.
2269 # If an installed package provides an old-style
2270 # virtual that is no longer provided by an available
2271 # package, the installed package may match an argument
2272 # atom even though none of the available packages do.
2273 # Therefore, "selective" logic does not consider
2274 # whether or not an installed package matches an
2275 # argument atom. It only considers whether or not
2276 # available packages match argument atoms, which is
2277 # represented by the found_available_arg flag.
2278 found_available_arg = False
2279 for find_existing_node in True, False:
2282 for db, pkg_type, built, installed, db_keys in dbs:
2285 if installed and not find_existing_node:
2286 want_reinstall = reinstall or empty or \
2287 (found_available_arg and not selective)
2288 if want_reinstall and matched_packages:
2291 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom,
2293 if pkg in self._dynamic_config._runtime_pkg_mask:
2294 # The package has been masked by the backtracking logic
2297 # Make --noreplace take precedence over --newuse.
2298 if not pkg.installed and noreplace and \
2299 cpv in vardb.match(atom):
2300 # If the installed version is masked, it may
2301 # be necessary to look at lower versions,
2302 # in case there is a visible downgrade.
2304 reinstall_for_flags = None
2306 if not pkg.installed or \
2307 (pkg.built and matched_packages and \
2308 not (avoid_update and pkg.installed)):
2309 # Only enforce visibility on installed packages
2310 # if there is at least one other visible package
2311 # available. By filtering installed masked packages
2312 # here, packages that have been masked since they
2313 # were installed can be automatically downgraded
2314 # to an unmasked version.
2316 if not visible(pkgsettings, pkg):
2318 except portage.exception.InvalidDependString:
2322 # Enable upgrade or downgrade to a version
2323 # with visible KEYWORDS when the installed
2324 # version is masked by KEYWORDS, but never
2325 # reinstall the same exact version only due
2326 # to a KEYWORDS mask.
2327 if built and matched_packages:
2329 different_version = None
2330 for avail_pkg in matched_packages:
2331 if not portage.dep.cpvequal(
2332 pkg.cpv, avail_pkg.cpv):
2333 different_version = avail_pkg
2335 if different_version is not None:
2338 pkgsettings._getMissingKeywords(
2339 pkg.cpv, pkg.metadata):
2342 # If the ebuild no longer exists or it's
2343 # keywords have been dropped, reject built
2344 # instances (installed or binary).
2345 # If --usepkgonly is enabled, assume that
2346 # the ebuild status should be ignored.
2350 pkg.cpv, "ebuild", root_config)
2351 except portage.exception.PackageNotFound:
2354 if not visible(pkgsettings, pkg_eb):
2357 # Calculation of USE for unbuilt ebuilds is relatively
2358 # expensive, so it is only performed lazily, after the
2359 # above visibility checks are complete.
2362 if root == self._frozen_config.target_root:
2364 myarg = next(self._iter_atoms_for_pkg(pkg))
2365 except StopIteration:
2367 except portage.exception.InvalidDependString:
2369 # masked by corruption
2371 if not installed and myarg:
2372 found_available_arg = True
2374 if atom.use and not pkg.built:
2375 use = pkg.use.enabled
2376 if atom.use.enabled.difference(use):
2378 if atom.use.disabled.intersection(use):
2380 if pkg.cp == atom_cp:
2381 if highest_version is None:
2382 highest_version = pkg
2383 elif pkg > highest_version:
2384 highest_version = pkg
2385 # At this point, we've found the highest visible
2386 # match from the current repo. Any lower versions
2387 # from this repo are ignored, so this so the loop
2388 # will always end with a break statement below
2390 if find_existing_node:
2391 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2394 # Use PackageSet.findAtomForPackage()
2395 # for PROVIDE support.
2396 if atom_set.findAtomForPackage(e_pkg):
2397 if highest_version and \
2398 e_pkg.cp == atom_cp and \
2399 e_pkg < highest_version and \
2400 e_pkg.slot_atom != highest_version.slot_atom:
2401 # There is a higher version available in a
2402 # different slot, so this existing node is
2406 matched_packages.append(e_pkg)
2407 existing_node = e_pkg
2409 # Compare built package to current config and
2410 # reject the built package if necessary.
2411 if built and not installed and \
2412 ("--newuse" in self._frozen_config.myopts or \
2413 "--reinstall" in self._frozen_config.myopts or \
2414 "--binpkg-respect-use" in self._frozen_config.myopts):
2415 iuses = pkg.iuse.all
2416 old_use = pkg.use.enabled
2418 pkgsettings.setcpv(myeb)
2420 pkgsettings.setcpv(pkg)
2421 now_use = pkgsettings["PORTAGE_USE"].split()
2422 forced_flags = set()
2423 forced_flags.update(pkgsettings.useforce)
2424 forced_flags.update(pkgsettings.usemask)
2426 if myeb and not usepkgonly:
2427 cur_iuse = myeb.iuse.all
2428 if self._reinstall_for_flags(forced_flags,
2432 # Compare current config to installed package
2433 # and do not reinstall if possible.
2434 if not installed and \
2435 ("--newuse" in self._frozen_config.myopts or \
2436 "--reinstall" in self._frozen_config.myopts) and \
2437 cpv in vardb.match(atom):
2438 pkgsettings.setcpv(pkg)
2439 forced_flags = set()
2440 forced_flags.update(pkgsettings.useforce)
2441 forced_flags.update(pkgsettings.usemask)
2442 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2443 old_iuse = set(filter_iuse_defaults(
2444 vardb.aux_get(cpv, ["IUSE"])[0].split()))
2445 cur_use = pkg.use.enabled
2446 cur_iuse = pkg.iuse.all
2447 reinstall_for_flags = \
2448 self._reinstall_for_flags(
2449 forced_flags, old_use, old_iuse,
2451 if reinstall_for_flags:
2455 matched_packages.append(pkg)
2456 if reinstall_for_flags:
2457 self._dynamic_config._reinstall_nodes[pkg] = \
2461 if not matched_packages:
2464 if "--debug" in self._frozen_config.myopts:
2465 for pkg in matched_packages:
2466 portage.writemsg("%s %s\n" % \
2467 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2469 # Filter out any old-style virtual matches if they are
2470 # mixed with new-style virtual matches.
2472 if len(matched_packages) > 1 and \
2473 "virtual" == portage.catsplit(cp)[0]:
2474 for pkg in matched_packages:
2477 # Got a new-style virtual, so filter
2478 # out any old-style virtuals.
2479 matched_packages = [pkg for pkg in matched_packages \
2483 if len(matched_packages) > 1:
2485 if existing_node is not None:
2486 return existing_node, existing_node
2487 for pkg in matched_packages:
2489 return pkg, existing_node
2491 bestmatch = portage.best(
2492 [pkg.cpv for pkg in matched_packages])
2493 matched_packages = [pkg for pkg in matched_packages \
2494 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2496 # ordered by type preference ("ebuild" type is the last resort)
2497 return matched_packages[-1], existing_node
2499 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2501 Select packages that have already been added to the graph or
2502 those that are installed and have not been scheduled for
2505 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
2506 matches = graph_db.match_pkgs(atom)
2509 pkg = matches[-1] # highest match
2510 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2511 return pkg, in_graph
2513 def _complete_graph(self):
2515 Add any deep dependencies of required sets (args, system, world) that
2516 have not been pulled into the graph yet. This ensures that the graph
2517 is consistent such that initially satisfied deep dependencies are not
2518 broken in the new graph. Initially unsatisfied dependencies are
2519 irrelevant since we only want to avoid breaking dependencies that are
2522 Since this method can consume enough time to disturb users, it is
2523 currently only enabled by the --complete-graph option.
2525 if "--buildpkgonly" in self._frozen_config.myopts or \
2526 "recurse" not in self._dynamic_config.myparams:
2529 if "complete" not in self._dynamic_config.myparams:
2530 # Skip this to avoid consuming enough time to disturb users.
2533 # Put the depgraph into a mode that causes it to only
2534 # select packages that have already been added to the
2535 # graph or those that are installed and have not been
2536 # scheduled for replacement. Also, toggle the "deep"
2537 # parameter so that all dependencies are traversed and
2539 self._select_atoms = self._select_atoms_from_graph
2540 self._select_package = self._select_pkg_from_graph
2541 already_deep = self._dynamic_config.myparams.get("deep") is True
2542 if not already_deep:
2543 self._dynamic_config.myparams["deep"] = True
2545 for root in self._frozen_config.roots:
2546 required_set_names = self._frozen_config._required_set_names.copy()
2547 if root == self._frozen_config.target_root and \
2548 (already_deep or "empty" in self._dynamic_config.myparams):
2549 required_set_names.difference_update(self._dynamic_config._sets)
2550 if not required_set_names and not self._dynamic_config._ignored_deps:
2552 root_config = self._frozen_config.roots[root]
2553 setconfig = root_config.setconfig
2555 # Reuse existing SetArg instances when available.
2556 for arg in self._dynamic_config.digraph.root_nodes():
2557 if not isinstance(arg, SetArg):
2559 if arg.root_config != root_config:
2561 if arg.name in required_set_names:
2563 required_set_names.remove(arg.name)
2564 # Create new SetArg instances only when necessary.
2565 for s in required_set_names:
2566 expanded_set = InternalPackageSet(
2567 initial_atoms=setconfig.getSetAtoms(s))
2568 atom = SETPREFIX + s
2569 args.append(SetArg(arg=atom, set=expanded_set,
2570 root_config=root_config))
2571 vardb = root_config.trees["vartree"].dbapi
2573 for atom in arg.set:
2574 self._dynamic_config._dep_stack.append(
2575 Dependency(atom=atom, root=root, parent=arg))
2576 if self._dynamic_config._ignored_deps:
2577 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
2578 self._dynamic_config._ignored_deps = []
2579 if not self._create_graph(allow_unsatisfied=True):
2581 # Check the unsatisfied deps to see if any initially satisfied deps
2582 # will become unsatisfied due to an upgrade. Initially unsatisfied
2583 # deps are irrelevant since we only want to avoid breaking deps
2584 # that are initially satisfied.
2585 while self._dynamic_config._unsatisfied_deps:
2586 dep = self._dynamic_config._unsatisfied_deps.pop()
2587 matches = vardb.match_pkgs(dep.atom)
2589 self._dynamic_config._initially_unsatisfied_deps.append(dep)
2591 # An scheduled installation broke a deep dependency.
2592 # Add the installed package to the graph so that it
2593 # will be appropriately reported as a slot collision
2594 # (possibly solvable via backtracking).
2595 pkg = matches[-1] # highest match
2596 if not self._add_pkg(pkg, dep):
2598 if not self._create_graph(allow_unsatisfied=True):
2602 def _pkg(self, cpv, type_name, root_config, installed=False,
2605 Get a package instance from the cache, or create a new
2606 one if necessary. Raises PackageNotFound from aux_get if it
2607 failures for some reason (package does not exist or is
2611 if installed or onlydeps:
2612 operation = "nomerge"
2613 pkg = self._frozen_config._pkg_cache.get(
2614 (type_name, root_config.root, cpv, operation))
2615 if pkg is None and onlydeps and not installed:
2616 # Maybe it already got pulled in as a "merge" node.
2617 pkg = self._dynamic_config.mydbapi[root_config.root].get(
2618 (type_name, root_config.root, cpv, 'merge'))
2621 tree_type = self.pkg_tree_map[type_name]
2622 db = root_config.trees[tree_type].dbapi
2623 db_keys = list(self._frozen_config._trees_orig[root_config.root][
2624 tree_type].dbapi._aux_cache_keys)
2626 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
2628 raise portage.exception.PackageNotFound(cpv)
2629 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
2630 installed=installed, metadata=metadata, onlydeps=onlydeps,
2631 root_config=root_config, type_name=type_name)
2632 self._frozen_config._pkg_cache[pkg] = pkg
2635 def _validate_blockers(self):
2636 """Remove any blockers from the digraph that do not match any of the
2637 packages within the graph. If necessary, create hard deps to ensure
2638 correct merge order such that mutually blocking packages are never
2639 installed simultaneously."""
2641 if "--buildpkgonly" in self._frozen_config.myopts or \
2642 "--nodeps" in self._frozen_config.myopts:
2645 #if "deep" in self._dynamic_config.myparams:
2647 # Pull in blockers from all installed packages that haven't already
2648 # been pulled into the depgraph. This is not enabled by default
2649 # due to the performance penalty that is incurred by all the
2650 # additional dep_check calls that are required.
2652 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2653 for myroot in self._frozen_config.trees:
2654 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
2655 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
2656 pkgsettings = self._frozen_config.pkgsettings[myroot]
2657 final_db = self._dynamic_config.mydbapi[myroot]
2659 blocker_cache = BlockerCache(myroot, vardb)
2660 stale_cache = set(blocker_cache)
2663 stale_cache.discard(cpv)
2664 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
2666 # Check for masked installed packages. Only warn about
2667 # packages that are in the graph in order to avoid warning
2668 # about those that will be automatically uninstalled during
2669 # the merge process or by --depclean.
2671 if pkg_in_graph and not visible(pkgsettings, pkg):
2672 self._dynamic_config._masked_installed.add(pkg)
2674 blocker_atoms = None
2680 self._dynamic_config._blocker_parents.child_nodes(pkg))
2685 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
2688 if blockers is not None:
2689 blockers = set(blocker.atom for blocker in blockers)
2691 # If this node has any blockers, create a "nomerge"
2692 # node for it so that they can be enforced.
2693 self._spinner_update()
2694 blocker_data = blocker_cache.get(cpv)
2695 if blocker_data is not None and \
2696 blocker_data.counter != long(pkg.metadata["COUNTER"]):
2699 # If blocker data from the graph is available, use
2700 # it to validate the cache and update the cache if
2702 if blocker_data is not None and \
2703 blockers is not None:
2704 if not blockers.symmetric_difference(
2705 blocker_data.atoms):
2709 if blocker_data is None and \
2710 blockers is not None:
2711 # Re-use the blockers from the graph.
2712 blocker_atoms = sorted(blockers)
2713 counter = long(pkg.metadata["COUNTER"])
2715 blocker_cache.BlockerData(counter, blocker_atoms)
2716 blocker_cache[pkg.cpv] = blocker_data
2720 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
2722 # Use aux_get() to trigger FakeVartree global
2723 # updates on *DEPEND when appropriate.
2724 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2725 # It is crucial to pass in final_db here in order to
2726 # optimize dep_check calls by eliminating atoms via
2727 # dep_wordreduce and dep_eval calls.
2729 portage.dep._dep_check_strict = False
2731 success, atoms = portage.dep_check(depstr,
2732 final_db, pkgsettings, myuse=pkg.use.enabled,
2733 trees=self._dynamic_config._graph_trees, myroot=myroot)
2734 except Exception as e:
2735 if isinstance(e, SystemExit):
2737 # This is helpful, for example, if a ValueError
2738 # is thrown from cpv_expand due to multiple
2739 # matches (this can happen if an atom lacks a
2741 show_invalid_depstring_notice(
2742 pkg, depstr, str(e))
2746 portage.dep._dep_check_strict = True
2748 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2749 if replacement_pkg and \
2750 replacement_pkg[0].operation == "merge":
2751 # This package is being replaced anyway, so
2752 # ignore invalid dependencies so as not to
2753 # annoy the user too much (otherwise they'd be
2754 # forced to manually unmerge it first).
2756 show_invalid_depstring_notice(pkg, depstr, atoms)
2758 blocker_atoms = [myatom for myatom in atoms \
2760 blocker_atoms.sort()
2761 counter = long(pkg.metadata["COUNTER"])
2762 blocker_cache[cpv] = \
2763 blocker_cache.BlockerData(counter, blocker_atoms)
2766 for atom in blocker_atoms:
2767 blocker = Blocker(atom=atom,
2768 eapi=pkg.metadata["EAPI"], root=myroot)
2769 self._dynamic_config._blocker_parents.add(blocker, pkg)
2770 except portage.exception.InvalidAtom as e:
2771 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2772 show_invalid_depstring_notice(
2773 pkg, depstr, "Invalid Atom: %s" % (e,))
2775 for cpv in stale_cache:
2776 del blocker_cache[cpv]
2777 blocker_cache.flush()
2780 # Discard any "uninstall" tasks scheduled by previous calls
2781 # to this method, since those tasks may not make sense given
2782 # the current graph state.
2783 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
2784 if previous_uninstall_tasks:
2785 self._dynamic_config._blocker_uninstalls = digraph()
2786 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
2788 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
2789 self._spinner_update()
2790 root_config = self._frozen_config.roots[blocker.root]
2791 virtuals = root_config.settings.getvirtuals()
2792 myroot = blocker.root
2793 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
2794 final_db = self._dynamic_config.mydbapi[myroot]
2796 provider_virtual = False
2797 if blocker.cp in virtuals and \
2798 not self._have_new_virt(blocker.root, blocker.cp):
2799 provider_virtual = True
2801 # Use this to check PROVIDE for each matched package
2803 atom_set = InternalPackageSet(
2804 initial_atoms=[blocker.atom])
2806 if provider_virtual:
2808 for provider_entry in virtuals[blocker.cp]:
2810 portage.dep_getkey(provider_entry)
2811 atoms.append(Atom(blocker.atom.replace(
2812 blocker.cp, provider_cp)))
2814 atoms = [blocker.atom]
2816 blocked_initial = set()
2818 for pkg in initial_db.match_pkgs(atom):
2819 if atom_set.findAtomForPackage(pkg):
2820 blocked_initial.add(pkg)
2822 blocked_final = set()
2824 for pkg in final_db.match_pkgs(atom):
2825 if atom_set.findAtomForPackage(pkg):
2826 blocked_final.add(pkg)
2828 if not blocked_initial and not blocked_final:
2829 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
2830 self._dynamic_config._blocker_parents.remove(blocker)
2831 # Discard any parents that don't have any more blockers.
2832 for pkg in parent_pkgs:
2833 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
2834 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
2835 self._dynamic_config._blocker_parents.remove(pkg)
2837 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
2838 unresolved_blocks = False
2839 depends_on_order = set()
2840 for pkg in blocked_initial:
2841 if pkg.slot_atom == parent.slot_atom and \
2842 not blocker.atom.blocker.overlap.forbid:
2843 # New !!atom blockers do not allow temporary
2844 # simulaneous installation, so unlike !atom
2845 # blockers, !!atom blockers aren't ignored
2846 # when they match other packages occupying
2849 if parent.installed:
2850 # Two currently installed packages conflict with
2851 # eachother. Ignore this case since the damage
2852 # is already done and this would be likely to
2853 # confuse users if displayed like a normal blocker.
2856 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
2858 if parent.operation == "merge":
2859 # Maybe the blocked package can be replaced or simply
2860 # unmerged to resolve this block.
2861 depends_on_order.add((pkg, parent))
2863 # None of the above blocker resolutions techniques apply,
2864 # so apparently this one is unresolvable.
2865 unresolved_blocks = True
2866 for pkg in blocked_final:
2867 if pkg.slot_atom == parent.slot_atom and \
2868 not blocker.atom.blocker.overlap.forbid:
2869 # New !!atom blockers do not allow temporary
2870 # simulaneous installation, so unlike !atom
2871 # blockers, !!atom blockers aren't ignored
2872 # when they match other packages occupying
2875 if parent.operation == "nomerge" and \
2876 pkg.operation == "nomerge":
2877 # This blocker will be handled the next time that a
2878 # merge of either package is triggered.
2881 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
2883 # Maybe the blocking package can be
2884 # unmerged to resolve this block.
2885 if parent.operation == "merge" and pkg.installed:
2886 depends_on_order.add((pkg, parent))
2888 elif parent.operation == "nomerge":
2889 depends_on_order.add((parent, pkg))
2891 # None of the above blocker resolutions techniques apply,
2892 # so apparently this one is unresolvable.
2893 unresolved_blocks = True
2895 # Make sure we don't unmerge any package that have been pulled
2897 if not unresolved_blocks and depends_on_order:
2898 for inst_pkg, inst_task in depends_on_order:
2899 if self._dynamic_config.digraph.contains(inst_pkg) and \
2900 self._dynamic_config.digraph.parent_nodes(inst_pkg):
2901 unresolved_blocks = True
2904 if not unresolved_blocks and depends_on_order:
2905 for inst_pkg, inst_task in depends_on_order:
2906 uninst_task = Package(built=inst_pkg.built,
2907 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
2908 metadata=inst_pkg.metadata,
2909 operation="uninstall",
2910 root_config=inst_pkg.root_config,
2911 type_name=inst_pkg.type_name)
2912 # Enforce correct merge order with a hard dep.
2913 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
2914 priority=BlockerDepPriority.instance)
2915 # Count references to this blocker so that it can be
2916 # invalidated after nodes referencing it have been
2918 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
2919 if not unresolved_blocks and not depends_on_order:
2920 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
2921 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
2922 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
2923 self._dynamic_config._blocker_parents.remove(blocker)
2924 if not self._dynamic_config._blocker_parents.child_nodes(parent):
2925 self._dynamic_config._blocker_parents.remove(parent)
2926 if unresolved_blocks:
2927 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
2931 def _accept_blocker_conflicts(self):
2933 for x in ("--buildpkgonly", "--fetchonly",
2934 "--fetch-all-uri", "--nodeps"):
2935 if x in self._frozen_config.myopts:
2940 def _merge_order_bias(self, mygraph):
2942 For optimal leaf node selection, promote deep system runtime deps and
2943 order nodes from highest to lowest overall reference count.
2947 for node in mygraph.order:
2948 node_info[node] = len(mygraph.parent_nodes(node))
2949 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
2951 def cmp_merge_preference(node1, node2):
2953 if node1.operation == 'uninstall':
2954 if node2.operation == 'uninstall':
2958 if node2.operation == 'uninstall':
2959 if node1.operation == 'uninstall':
2963 node1_sys = node1 in deep_system_deps
2964 node2_sys = node2 in deep_system_deps
2965 if node1_sys != node2_sys:
2970 return node_info[node2] - node_info[node1]
2972 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
2974 def altlist(self, reversed=False):
2976 while self._dynamic_config._serialized_tasks_cache is None:
2977 self._resolve_conflicts()
2979 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
2980 self._serialize_tasks()
2981 except self._serialize_tasks_retry:
2984 retlist = self._dynamic_config._serialized_tasks_cache[:]
2989 def schedulerGraph(self):
2991 The scheduler graph is identical to the normal one except that
2992 uninstall edges are reversed in specific cases that require
2993 conflicting packages to be temporarily installed simultaneously.
2994 This is intended for use by the Scheduler in it's parallelization
2995 logic. It ensures that temporary simultaneous installation of
2996 conflicting packages is avoided when appropriate (especially for
2997 !!atom blockers), but allowed in specific cases that require it.
2999 Note that this method calls break_refs() which alters the state of
3000 internal Package instances such that this depgraph instance should
3001 not be used to perform any more calculations.
3003 if self._dynamic_config._scheduler_graph is None:
3005 self.break_refs(self._dynamic_config._scheduler_graph.order)
3006 return self._dynamic_config._scheduler_graph
3008 def break_refs(self, nodes):
3010 Take a mergelist like that returned from self.altlist() and
3011 break any references that lead back to the depgraph. This is
3012 useful if you want to hold references to packages without
3013 also holding the depgraph on the heap.
3016 if hasattr(node, "root_config"):
3017 # The FakeVartree references the _package_cache which
3018 # references the depgraph. So that Package instances don't
3019 # hold the depgraph and FakeVartree on the heap, replace
3020 # the RootConfig that references the FakeVartree with the
3021 # original RootConfig instance which references the actual
3023 node.root_config = \
3024 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
3026 def _resolve_conflicts(self):
3027 if not self._complete_graph():
3028 raise self._unknown_internal_error()
3030 if not self._validate_blockers():
3031 raise self._unknown_internal_error()
3033 if self._dynamic_config._slot_collision_info:
3034 self._process_slot_conflicts()
3036 def _serialize_tasks(self):
3038 if "--debug" in self._frozen_config.myopts:
3039 writemsg("\ndigraph:\n\n", noiselevel=-1)
3040 self._dynamic_config.digraph.debug_print()
3041 writemsg("\n", noiselevel=-1)
3043 scheduler_graph = self._dynamic_config.digraph.copy()
3045 if '--nodeps' in self._frozen_config.myopts:
3046 # Preserve the package order given on the command line.
3047 return ([node for node in scheduler_graph \
3048 if isinstance(node, Package) \
3049 and node.operation == 'merge'], scheduler_graph)
3051 mygraph=self._dynamic_config.digraph.copy()
3052 # Prune "nomerge" root nodes if nothing depends on them, since
3053 # otherwise they slow down merge order calculation. Don't remove
3054 # non-root nodes since they help optimize merge order in some cases
3055 # such as revdep-rebuild.
3056 removed_nodes = set()
3058 for node in mygraph.root_nodes():
3059 if not isinstance(node, Package) or \
3060 node.installed or node.onlydeps:
3061 removed_nodes.add(node)
3063 self._spinner_update()
3064 mygraph.difference_update(removed_nodes)
3065 if not removed_nodes:
3067 removed_nodes.clear()
3068 self._merge_order_bias(mygraph)
3069 def cmp_circular_bias(n1, n2):
3071 RDEPEND is stronger than PDEPEND and this function
3072 measures such a strength bias within a circular
3073 dependency relationship.
3075 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3076 ignore_priority=priority_range.ignore_medium_soft)
3077 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3078 ignore_priority=priority_range.ignore_medium_soft)
3079 if n1_n2_medium == n2_n1_medium:
3084 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
3086 # Contains uninstall tasks that have been scheduled to
3087 # occur after overlapping blockers have been installed.
3088 scheduled_uninstalls = set()
3089 # Contains any Uninstall tasks that have been ignored
3090 # in order to avoid the circular deps code path. These
3091 # correspond to blocker conflicts that could not be
3093 ignored_uninstall_tasks = set()
3094 have_uninstall_task = False
3095 complete = "complete" in self._dynamic_config.myparams
3098 def get_nodes(**kwargs):
3100 Returns leaf nodes excluding Uninstall instances
3101 since those should be executed as late as possible.
3103 return [node for node in mygraph.leaf_nodes(**kwargs) \
3104 if isinstance(node, Package) and \
3105 (node.operation != "uninstall" or \
3106 node in scheduled_uninstalls)]
3108 # sys-apps/portage needs special treatment if ROOT="/"
3109 running_root = self._frozen_config._running_root.root
3110 from portage.const import PORTAGE_PACKAGE_ATOM
3111 runtime_deps = InternalPackageSet(
3112 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3113 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
3114 PORTAGE_PACKAGE_ATOM)
3115 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
3116 PORTAGE_PACKAGE_ATOM)
3119 running_portage = running_portage[0]
3121 running_portage = None
3123 if replacement_portage:
3124 replacement_portage = replacement_portage[0]
3126 replacement_portage = None
3128 if replacement_portage == running_portage:
3129 replacement_portage = None
3131 if replacement_portage is not None:
3132 # update from running_portage to replacement_portage asap
3133 asap_nodes.append(replacement_portage)
3135 if running_portage is not None:
3137 portage_rdepend = self._select_atoms_highest_available(
3138 running_root, running_portage.metadata["RDEPEND"],
3139 myuse=running_portage.use.enabled,
3140 parent=running_portage, strict=False)
3141 except portage.exception.InvalidDependString as e:
3142 portage.writemsg("!!! Invalid RDEPEND in " + \
3143 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3144 (running_root, running_portage.cpv, e), noiselevel=-1)
3146 portage_rdepend = {running_portage : []}
3147 for atoms in portage_rdepend.values():
3148 runtime_deps.update(atom for atom in atoms \
3149 if not atom.blocker)
3151 def gather_deps(ignore_priority, mergeable_nodes,
3152 selected_nodes, node):
3154 Recursively gather a group of nodes that RDEPEND on
3155 eachother. This ensures that they are merged as a group
3156 and get their RDEPENDs satisfied as soon as possible.
3158 if node in selected_nodes:
3160 if node not in mergeable_nodes:
3162 if node == replacement_portage and \
3163 mygraph.child_nodes(node,
3164 ignore_priority=priority_range.ignore_medium_soft):
3165 # Make sure that portage always has all of it's
3166 # RDEPENDs installed first.
3168 selected_nodes.add(node)
3169 for child in mygraph.child_nodes(node,
3170 ignore_priority=ignore_priority):
3171 if not gather_deps(ignore_priority,
3172 mergeable_nodes, selected_nodes, child):
3176 def ignore_uninst_or_med(priority):
3177 if priority is BlockerDepPriority.instance:
3179 return priority_range.ignore_medium(priority)
3181 def ignore_uninst_or_med_soft(priority):
3182 if priority is BlockerDepPriority.instance:
3184 return priority_range.ignore_medium_soft(priority)
3186 tree_mode = "--tree" in self._frozen_config.myopts
3187 # Tracks whether or not the current iteration should prefer asap_nodes
3188 # if available. This is set to False when the previous iteration
3189 # failed to select any nodes. It is reset whenever nodes are
3190 # successfully selected.
3193 # Controls whether or not the current iteration should drop edges that
3194 # are "satisfied" by installed packages, in order to solve circular
3195 # dependencies. The deep runtime dependencies of installed packages are
3196 # not checked in this case (bug #199856), so it must be avoided
3197 # whenever possible.
3198 drop_satisfied = False
3200 # State of variables for successive iterations that loosen the
3201 # criteria for node selection.
3203 # iteration prefer_asap drop_satisfied
3208 # If no nodes are selected on the last iteration, it is due to
3209 # unresolved blockers or circular dependencies.
3211 while not mygraph.empty():
3212 self._spinner_update()
3213 selected_nodes = None
3214 ignore_priority = None
3215 if drop_satisfied or (prefer_asap and asap_nodes):
3216 priority_range = DepPrioritySatisfiedRange
3218 priority_range = DepPriorityNormalRange
3219 if prefer_asap and asap_nodes:
3220 # ASAP nodes are merged before their soft deps. Go ahead and
3221 # select root nodes here if necessary, since it's typical for
3222 # the parent to have been removed from the graph already.
3223 asap_nodes = [node for node in asap_nodes \
3224 if mygraph.contains(node)]
3225 for node in asap_nodes:
3226 if not mygraph.child_nodes(node,
3227 ignore_priority=priority_range.ignore_soft):
3228 selected_nodes = [node]
3229 asap_nodes.remove(node)
3231 if not selected_nodes and \
3232 not (prefer_asap and asap_nodes):
3233 for i in range(priority_range.NONE,
3234 priority_range.MEDIUM_SOFT + 1):
3235 ignore_priority = priority_range.ignore_priority[i]
3236 nodes = get_nodes(ignore_priority=ignore_priority)
3238 # If there is a mixture of merges and uninstalls,
3239 # do the uninstalls first.
3241 good_uninstalls = []
3243 if node.operation == "uninstall":
3244 good_uninstalls.append(node)
3247 nodes = good_uninstalls
3251 if ignore_priority is None and not tree_mode:
3252 # Greedily pop all of these nodes since no
3253 # relationship has been ignored. This optimization
3254 # destroys --tree output, so it's disabled in tree
3256 selected_nodes = nodes
3258 # For optimal merge order:
3259 # * Only pop one node.
3260 # * Removing a root node (node without a parent)
3261 # will not produce a leaf node, so avoid it.
3262 # * It's normal for a selected uninstall to be a
3263 # root node, so don't check them for parents.
3265 if node.operation == "uninstall" or \
3266 mygraph.parent_nodes(node):
3267 selected_nodes = [node]
3273 if not selected_nodes:
3274 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
3276 mergeable_nodes = set(nodes)
3277 if prefer_asap and asap_nodes:
3279 for i in range(priority_range.SOFT,
3280 priority_range.MEDIUM_SOFT + 1):
3281 ignore_priority = priority_range.ignore_priority[i]
3283 if not mygraph.parent_nodes(node):
3285 selected_nodes = set()
3286 if gather_deps(ignore_priority,
3287 mergeable_nodes, selected_nodes, node):
3290 selected_nodes = None
3294 if prefer_asap and asap_nodes and not selected_nodes:
3295 # We failed to find any asap nodes to merge, so ignore
3296 # them for the next iteration.
3300 if selected_nodes and ignore_priority is not None:
3301 # Try to merge ignored medium_soft deps as soon as possible
3302 # if they're not satisfied by installed packages.
3303 for node in selected_nodes:
3304 children = set(mygraph.child_nodes(node))
3305 soft = children.difference(
3306 mygraph.child_nodes(node,
3307 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
3308 medium_soft = children.difference(
3309 mygraph.child_nodes(node,
3311 DepPrioritySatisfiedRange.ignore_medium_soft))
3312 medium_soft.difference_update(soft)
3313 for child in medium_soft:
3314 if child in selected_nodes:
3316 if child in asap_nodes:
3318 asap_nodes.append(child)
3320 if selected_nodes and len(selected_nodes) > 1:
3321 if not isinstance(selected_nodes, list):
3322 selected_nodes = list(selected_nodes)
3323 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
3325 if not selected_nodes and not myblocker_uninstalls.is_empty():
3326 # An Uninstall task needs to be executed in order to
3327 # avoid conflict if possible.
3330 priority_range = DepPrioritySatisfiedRange
3332 priority_range = DepPriorityNormalRange
3334 mergeable_nodes = get_nodes(
3335 ignore_priority=ignore_uninst_or_med)
3337 min_parent_deps = None
3339 for task in myblocker_uninstalls.leaf_nodes():
3340 # Do some sanity checks so that system or world packages
3341 # don't get uninstalled inappropriately here (only really
3342 # necessary when --complete-graph has not been enabled).
3344 if task in ignored_uninstall_tasks:
3347 if task in scheduled_uninstalls:
3348 # It's been scheduled but it hasn't
3349 # been executed yet due to dependence
3350 # on installation of blocking packages.
3353 root_config = self._frozen_config.roots[task.root]
3354 inst_pkg = self._pkg(task.cpv, "installed", root_config,
3357 if self._dynamic_config.digraph.contains(inst_pkg):
3360 forbid_overlap = False
3361 heuristic_overlap = False
3362 for blocker in myblocker_uninstalls.parent_nodes(task):
3363 if blocker.eapi in ("0", "1"):
3364 heuristic_overlap = True
3365 elif blocker.atom.blocker.overlap.forbid:
3366 forbid_overlap = True
3368 if forbid_overlap and running_root == task.root:
3371 if heuristic_overlap and running_root == task.root:
3372 # Never uninstall sys-apps/portage or it's essential
3373 # dependencies, except through replacement.
3375 runtime_dep_atoms = \
3376 list(runtime_deps.iterAtomsForPackage(task))
3377 except portage.exception.InvalidDependString as e:
3378 portage.writemsg("!!! Invalid PROVIDE in " + \
3379 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3380 (task.root, task.cpv, e), noiselevel=-1)
3384 # Don't uninstall a runtime dep if it appears
3385 # to be the only suitable one installed.
3387 vardb = root_config.trees["vartree"].dbapi
3388 for atom in runtime_dep_atoms:
3389 other_version = None
3390 for pkg in vardb.match_pkgs(atom):
3391 if pkg.cpv == task.cpv and \
3392 pkg.metadata["COUNTER"] == \
3393 task.metadata["COUNTER"]:
3397 if other_version is None:
3403 # For packages in the system set, don't take
3404 # any chances. If the conflict can't be resolved
3405 # by a normal replacement operation then abort.
3408 for atom in root_config.sets[
3409 "system"].iterAtomsForPackage(task):
3412 except portage.exception.InvalidDependString as e:
3413 portage.writemsg("!!! Invalid PROVIDE in " + \
3414 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3415 (task.root, task.cpv, e), noiselevel=-1)
3421 # Note that the world check isn't always
3422 # necessary since self._complete_graph() will
3423 # add all packages from the system and world sets to the
3424 # graph. This just allows unresolved conflicts to be
3425 # detected as early as possible, which makes it possible
3426 # to avoid calling self._complete_graph() when it is
3427 # unnecessary due to blockers triggering an abortion.
3429 # For packages in the world set, go ahead an uninstall
3430 # when necessary, as long as the atom will be satisfied
3431 # in the final state.
3432 graph_db = self._dynamic_config.mydbapi[task.root]
3435 for atom in root_config.sets[
3436 "world"].iterAtomsForPackage(task):
3438 for pkg in graph_db.match_pkgs(atom):
3445 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
3447 except portage.exception.InvalidDependString as e:
3448 portage.writemsg("!!! Invalid PROVIDE in " + \
3449 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3450 (task.root, task.cpv, e), noiselevel=-1)
3456 # Check the deps of parent nodes to ensure that
3457 # the chosen task produces a leaf node. Maybe
3458 # this can be optimized some more to make the
3459 # best possible choice, but the current algorithm
3460 # is simple and should be near optimal for most
3462 mergeable_parent = False
3464 for parent in mygraph.parent_nodes(task):
3465 parent_deps.update(mygraph.child_nodes(parent,
3466 ignore_priority=priority_range.ignore_medium_soft))
3467 if parent in mergeable_nodes and \
3468 gather_deps(ignore_uninst_or_med_soft,
3469 mergeable_nodes, set(), parent):
3470 mergeable_parent = True
3472 if not mergeable_parent:
3475 parent_deps.remove(task)
3476 if min_parent_deps is None or \
3477 len(parent_deps) < min_parent_deps:
3478 min_parent_deps = len(parent_deps)
3481 if uninst_task is not None:
3482 # The uninstall is performed only after blocking
3483 # packages have been merged on top of it. File
3484 # collisions between blocking packages are detected
3485 # and removed from the list of files to be uninstalled.
3486 scheduled_uninstalls.add(uninst_task)
3487 parent_nodes = mygraph.parent_nodes(uninst_task)
3489 # Reverse the parent -> uninstall edges since we want
3490 # to do the uninstall after blocking packages have
3491 # been merged on top of it.
3492 mygraph.remove(uninst_task)
3493 for blocked_pkg in parent_nodes:
3494 mygraph.add(blocked_pkg, uninst_task,
3495 priority=BlockerDepPriority.instance)
3496 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3497 scheduler_graph.add(blocked_pkg, uninst_task,
3498 priority=BlockerDepPriority.instance)
3500 # Sometimes a merge node will render an uninstall
3501 # node unnecessary (due to occupying the same SLOT),
3502 # and we want to avoid executing a separate uninstall
3503 # task in that case.
3504 slot_node = self._dynamic_config.mydbapi[uninst_task.root
3505 ].match_pkgs(uninst_task.slot_atom)
3507 slot_node[0].operation == "merge":
3508 mygraph.add(slot_node[0], uninst_task,
3509 priority=BlockerDepPriority.instance)
3511 # Reset the state variables for leaf node selection and
3512 # continue trying to select leaf nodes.
3514 drop_satisfied = False
3517 if not selected_nodes:
3518 # Only select root nodes as a last resort. This case should
3519 # only trigger when the graph is nearly empty and the only
3520 # remaining nodes are isolated (no parents or children). Since
3521 # the nodes must be isolated, ignore_priority is not needed.
3522 selected_nodes = get_nodes()
3524 if not selected_nodes and not drop_satisfied:
3525 drop_satisfied = True
3528 if not selected_nodes and not myblocker_uninstalls.is_empty():
3529 # If possible, drop an uninstall task here in order to avoid
3530 # the circular deps code path. The corresponding blocker will
3531 # still be counted as an unresolved conflict.
3533 for node in myblocker_uninstalls.leaf_nodes():
3535 mygraph.remove(node)
3540 ignored_uninstall_tasks.add(node)
3543 if uninst_task is not None:
3544 # Reset the state variables for leaf node selection and
3545 # continue trying to select leaf nodes.
3547 drop_satisfied = False
3550 if not selected_nodes:
3551 self._dynamic_config._circular_deps_for_display = mygraph
3552 raise self._unknown_internal_error()
3554 # At this point, we've succeeded in selecting one or more nodes, so
3555 # reset state variables for leaf node selection.
3557 drop_satisfied = False
3559 mygraph.difference_update(selected_nodes)
3561 for node in selected_nodes:
3562 if isinstance(node, Package) and \
3563 node.operation == "nomerge":
3566 # Handle interactions between blockers
3567 # and uninstallation tasks.
3568 solved_blockers = set()
3570 if isinstance(node, Package) and \
3571 "uninstall" == node.operation:
3572 have_uninstall_task = True
3575 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
3576 previous_cpv = vardb.match(node.slot_atom)
3578 # The package will be replaced by this one, so remove
3579 # the corresponding Uninstall task if necessary.
3580 previous_cpv = previous_cpv[0]
3582 ("installed", node.root, previous_cpv, "uninstall")
3584 mygraph.remove(uninst_task)
3588 if uninst_task is not None and \
3589 uninst_task not in ignored_uninstall_tasks and \
3590 myblocker_uninstalls.contains(uninst_task):
3591 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3592 myblocker_uninstalls.remove(uninst_task)
3593 # Discard any blockers that this Uninstall solves.
3594 for blocker in blocker_nodes:
3595 if not myblocker_uninstalls.child_nodes(blocker):
3596 myblocker_uninstalls.remove(blocker)
3597 solved_blockers.add(blocker)
3599 retlist.append(node)
3601 if (isinstance(node, Package) and \
3602 "uninstall" == node.operation) or \
3603 (uninst_task is not None and \
3604 uninst_task in scheduled_uninstalls):
3605 # Include satisfied blockers in the merge list
3606 # since the user might be interested and also
3607 # it serves as an indicator that blocking packages
3608 # will be temporarily installed simultaneously.
3609 for blocker in solved_blockers:
3610 retlist.append(Blocker(atom=blocker.atom,
3611 root=blocker.root, eapi=blocker.eapi,
3614 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
3615 for node in myblocker_uninstalls.root_nodes():
3616 unsolvable_blockers.add(node)
3618 for blocker in unsolvable_blockers:
3619 retlist.append(blocker)
3621 # If any Uninstall tasks need to be executed in order
3622 # to avoid a conflict, complete the graph with any
3623 # dependencies that may have been initially
3624 # neglected (to ensure that unsafe Uninstall tasks
3625 # are properly identified and blocked from execution).
3626 if have_uninstall_task and \
3628 not unsolvable_blockers:
3629 self._dynamic_config.myparams["complete"] = True
3630 raise self._serialize_tasks_retry("")
3632 if unsolvable_blockers and \
3633 not self._accept_blocker_conflicts():
3634 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
3635 self._dynamic_config._serialized_tasks_cache = retlist[:]
3636 self._dynamic_config._scheduler_graph = scheduler_graph
3637 raise self._unknown_internal_error()
3639 if self._dynamic_config._slot_collision_info and \
3640 not self._accept_blocker_conflicts():
3641 self._dynamic_config._serialized_tasks_cache = retlist[:]
3642 self._dynamic_config._scheduler_graph = scheduler_graph
3643 raise self._unknown_internal_error()
3645 return retlist, scheduler_graph
3647 def _show_circular_deps(self, mygraph):
3648 # No leaf nodes are available, so we have a circular
3649 # dependency panic situation. Reduce the noise level to a
3650 # minimum via repeated elimination of root nodes since they
3651 # have no parents and thus can not be part of a cycle.
3653 root_nodes = mygraph.root_nodes(
3654 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3657 mygraph.difference_update(root_nodes)
3658 # Display the USE flags that are enabled on nodes that are part
3659 # of dependency cycles in case that helps the user decide to
3660 # disable some of them.
3662 tempgraph = mygraph.copy()
3663 while not tempgraph.empty():
3664 nodes = tempgraph.leaf_nodes()
3666 node = tempgraph.order[0]
3669 display_order.append(node)
3670 tempgraph.remove(node)
3671 display_order.reverse()
3672 self._frozen_config.myopts.pop("--quiet", None)
3673 self._frozen_config.myopts.pop("--verbose", None)
3674 self._frozen_config.myopts["--tree"] = True
3675 portage.writemsg("\n\n", noiselevel=-1)
3676 self.display(display_order)
3677 prefix = colorize("BAD", " * ")
3678 portage.writemsg("\n", noiselevel=-1)
3679 portage.writemsg(prefix + "Error: circular dependencies:\n",
3681 portage.writemsg("\n", noiselevel=-1)
3682 mygraph.debug_print()
3683 portage.writemsg("\n", noiselevel=-1)
3684 portage.writemsg(prefix + "Note that circular dependencies " + \
3685 "can often be avoided by temporarily\n", noiselevel=-1)
3686 portage.writemsg(prefix + "disabling USE flags that trigger " + \
3687 "optional dependencies.\n", noiselevel=-1)
3689 def _show_merge_list(self):
3690 if self._dynamic_config._serialized_tasks_cache is not None and \
3691 not (self._dynamic_config._displayed_list and \
3692 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
3693 self._dynamic_config._displayed_list == \
3694 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
3695 display_list = self._dynamic_config._serialized_tasks_cache[:]
3696 if "--tree" in self._frozen_config.myopts:
3697 display_list.reverse()
3698 self.display(display_list)
3700 def _show_unsatisfied_blockers(self, blockers):
3701 self._show_merge_list()
3702 msg = "Error: The above package list contains " + \
3703 "packages which cannot be installed " + \
3704 "at the same time on the same system."
3705 prefix = colorize("BAD", " * ")
3706 from textwrap import wrap
3707 portage.writemsg("\n", noiselevel=-1)
3708 for line in wrap(msg, 70):
3709 portage.writemsg(prefix + line + "\n", noiselevel=-1)
3711 # Display the conflicting packages along with the packages
3712 # that pulled them in. This is helpful for troubleshooting
3713 # cases in which blockers don't solve automatically and
3714 # the reasons are not apparent from the normal merge list
3718 for blocker in blockers:
3719 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
3720 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
3721 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3722 if not parent_atoms:
3723 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
3724 if atom is not None:
3725 parent_atoms = set([("@world", atom)])
3727 conflict_pkgs[pkg] = parent_atoms
3730 # Reduce noise by pruning packages that are only
3731 # pulled in by other conflict packages.
3733 for pkg, parent_atoms in conflict_pkgs.items():
3734 relevant_parent = False
3735 for parent, atom in parent_atoms:
3736 if parent not in conflict_pkgs:
3737 relevant_parent = True
3739 if not relevant_parent:
3740 pruned_pkgs.add(pkg)
3741 for pkg in pruned_pkgs:
3742 del conflict_pkgs[pkg]
3748 # Max number of parents shown, to avoid flooding the display.
3750 for pkg, parent_atoms in conflict_pkgs.items():
3754 # Prefer packages that are not directly involved in a conflict.
3755 for parent_atom in parent_atoms:
3756 if len(pruned_list) >= max_parents:
3758 parent, atom = parent_atom
3759 if parent not in conflict_pkgs:
3760 pruned_list.add(parent_atom)
3762 for parent_atom in parent_atoms:
3763 if len(pruned_list) >= max_parents:
3765 pruned_list.add(parent_atom)
3767 omitted_parents = len(parent_atoms) - len(pruned_list)
3768 msg.append(indent + "%s pulled in by\n" % pkg)
3770 for parent_atom in pruned_list:
3771 parent, atom = parent_atom
3772 msg.append(2*indent)
3773 if isinstance(parent,
3774 (PackageArg, AtomArg)):
3775 # For PackageArg and AtomArg types, it's
3776 # redundant to display the atom attribute.
3777 msg.append(str(parent))
3779 # Display the specific atom from SetArg or
3781 msg.append("%s required by %s" % (atom, parent))
3785 msg.append(2*indent)
3786 msg.append("(and %d more)\n" % omitted_parents)
3790 sys.stderr.write("".join(msg))
3793 if "--quiet" not in self._frozen_config.myopts:
3794 show_blocker_docs_link()
3796 def display(self, mylist, favorites=[], verbosity=None):
3798 # This is used to prevent display_problems() from
3799 # redundantly displaying this exact same merge list
3800 # again via _show_merge_list().
3801 self._dynamic_config._displayed_list = mylist
3803 if verbosity is None:
3804 verbosity = ("--quiet" in self._frozen_config.myopts and 1 or \
3805 "--verbose" in self._frozen_config.myopts and 3 or 2)
3806 favorites_set = InternalPackageSet(favorites)
3807 oneshot = "--oneshot" in self._frozen_config.myopts or \
3808 "--onlydeps" in self._frozen_config.myopts
3809 columns = "--columns" in self._frozen_config.myopts
3814 counters = PackageCounters()
3816 if verbosity == 1 and "--verbose" not in self._frozen_config.myopts:
3817 def create_use_string(*args):
3820 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3822 is_new, reinst_flags,
3823 all_flags=(verbosity == 3 or "--quiet" in self._frozen_config.myopts),
3824 alphabetical=("--alphabetical" in self._frozen_config.myopts)):
3832 cur_iuse = set(cur_iuse)
3833 enabled_flags = cur_iuse.intersection(cur_use)
3834 removed_iuse = set(old_iuse).difference(cur_iuse)
3835 any_iuse = cur_iuse.union(old_iuse)
3836 any_iuse = list(any_iuse)
3838 for flag in any_iuse:
3841 reinst_flag = reinst_flags and flag in reinst_flags
3842 if flag in enabled_flags:
3844 if is_new or flag in old_use and \
3845 (all_flags or reinst_flag):
3846 flag_str = red(flag)
3847 elif flag not in old_iuse:
3848 flag_str = yellow(flag) + "%*"
3849 elif flag not in old_use:
3850 flag_str = green(flag) + "*"
3851 elif flag in removed_iuse:
3852 if all_flags or reinst_flag:
3853 flag_str = yellow("-" + flag) + "%"
3856 flag_str = "(" + flag_str + ")"
3857 removed.append(flag_str)
3860 if is_new or flag in old_iuse and \
3861 flag not in old_use and \
3862 (all_flags or reinst_flag):
3863 flag_str = blue("-" + flag)
3864 elif flag not in old_iuse:
3865 flag_str = yellow("-" + flag)
3866 if flag not in iuse_forced:
3868 elif flag in old_use:
3869 flag_str = green("-" + flag) + "*"
3871 if flag in iuse_forced:
3872 flag_str = "(" + flag_str + ")"
3874 enabled.append(flag_str)
3876 disabled.append(flag_str)
3879 ret = " ".join(enabled)
3881 ret = " ".join(enabled + disabled + removed)
3883 ret = '%s="%s" ' % (name, ret)
3886 repo_display = RepoDisplay(self._frozen_config.roots)
3890 mygraph = self._dynamic_config.digraph.copy()
3892 # If there are any Uninstall instances, add the corresponding
3893 # blockers to the digraph (useful for --tree display).
3895 executed_uninstalls = set(node for node in mylist \
3896 if isinstance(node, Package) and node.operation == "unmerge")
3898 for uninstall in self._dynamic_config._blocker_uninstalls.leaf_nodes():
3899 uninstall_parents = \
3900 self._dynamic_config._blocker_uninstalls.parent_nodes(uninstall)
3901 if not uninstall_parents:
3904 # Remove the corresponding "nomerge" node and substitute
3905 # the Uninstall node.
3906 inst_pkg = self._pkg(uninstall.cpv, "installed",
3907 uninstall.root_config, installed=True)
3910 mygraph.remove(inst_pkg)
3915 inst_pkg_blockers = self._dynamic_config._blocker_parents.child_nodes(inst_pkg)
3917 inst_pkg_blockers = []
3919 # Break the Package -> Uninstall edges.
3920 mygraph.remove(uninstall)
3922 # Resolution of a package's blockers
3923 # depend on it's own uninstallation.
3924 for blocker in inst_pkg_blockers:
3925 mygraph.add(uninstall, blocker)
3927 # Expand Package -> Uninstall edges into
3928 # Package -> Blocker -> Uninstall edges.
3929 for blocker in uninstall_parents:
3930 mygraph.add(uninstall, blocker)
3931 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3932 if parent != inst_pkg:
3933 mygraph.add(blocker, parent)
3935 # If the uninstall task did not need to be executed because
3936 # of an upgrade, display Blocker -> Upgrade edges since the
3937 # corresponding Blocker -> Uninstall edges will not be shown.
3939 self._dynamic_config._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
3940 if upgrade_node is not None and \
3941 uninstall not in executed_uninstalls:
3942 for blocker in uninstall_parents:
3943 mygraph.add(upgrade_node, blocker)
3945 unsatisfied_blockers = []
3950 if isinstance(x, Blocker) and not x.satisfied:
3951 unsatisfied_blockers.append(x)
3954 if "--tree" in self._frozen_config.myopts:
3955 depth = len(tree_nodes)
3956 while depth and graph_key not in \
3957 mygraph.child_nodes(tree_nodes[depth-1]):
3960 tree_nodes = tree_nodes[:depth]
3961 tree_nodes.append(graph_key)
3962 display_list.append((x, depth, True))
3963 shown_edges.add((graph_key, tree_nodes[depth-1]))
3965 traversed_nodes = set() # prevent endless circles
3966 traversed_nodes.add(graph_key)
3967 def add_parents(current_node, ordered):
3969 # Do not traverse to parents if this node is an
3970 # an argument or a direct member of a set that has
3971 # been specified as an argument (system or world).
3972 if current_node not in self._dynamic_config._set_nodes:
3973 parent_nodes = mygraph.parent_nodes(current_node)
3975 child_nodes = set(mygraph.child_nodes(current_node))
3976 selected_parent = None
3977 # First, try to avoid a direct cycle.
3978 for node in parent_nodes:
3979 if not isinstance(node, (Blocker, Package)):
3981 if node not in traversed_nodes and \
3982 node not in child_nodes:
3983 edge = (current_node, node)
3984 if edge in shown_edges:
3986 selected_parent = node
3988 if not selected_parent:
3989 # A direct cycle is unavoidable.
3990 for node in parent_nodes:
3991 if not isinstance(node, (Blocker, Package)):
3993 if node not in traversed_nodes:
3994 edge = (current_node, node)
3995 if edge in shown_edges:
3997 selected_parent = node
4000 shown_edges.add((current_node, selected_parent))
4001 traversed_nodes.add(selected_parent)
4002 add_parents(selected_parent, False)
4003 display_list.append((current_node,
4004 len(tree_nodes), ordered))
4005 tree_nodes.append(current_node)
4007 add_parents(graph_key, True)
4009 display_list.append((x, depth, True))
4010 mylist = display_list
4011 for x in unsatisfied_blockers:
4012 mylist.append((x, 0, True))
4014 last_merge_depth = 0
4015 for i in range(len(mylist)-1,-1,-1):
4016 graph_key, depth, ordered = mylist[i]
4017 if not ordered and depth == 0 and i > 0 \
4018 and graph_key == mylist[i-1][0] and \
4019 mylist[i-1][1] == 0:
4020 # An ordered node got a consecutive duplicate when the tree was
4024 if ordered and graph_key[-1] != "nomerge":
4025 last_merge_depth = depth
4027 if depth >= last_merge_depth or \
4028 i < len(mylist) - 1 and \
4029 depth >= mylist[i+1][1]:
4032 # files to fetch list - avoids counting a same file twice
4033 # in size display (verbose mode)
4036 # Use this set to detect when all the "repoadd" strings are "[0]"
4037 # and disable the entire repo display in this case.
4040 for mylist_index in range(len(mylist)):
4041 x, depth, ordered = mylist[mylist_index]
4045 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4046 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
4047 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4048 vartree = self._frozen_config.trees[myroot]["vartree"]
4049 pkgsettings = self._frozen_config.pkgsettings[myroot]
4052 indent = " " * depth
4054 if isinstance(x, Blocker):
4056 blocker_style = "PKG_BLOCKER_SATISFIED"
4057 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4059 blocker_style = "PKG_BLOCKER"
4060 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4062 counters.blocks += 1
4064 counters.blocks_satisfied += 1
4065 resolved = portage.dep_expand(
4066 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
4067 if "--columns" in self._frozen_config.myopts and "--quiet" in self._frozen_config.myopts:
4068 addl += " " + colorize(blocker_style, str(resolved))
4070 addl = "[%s %s] %s%s" % \
4071 (colorize(blocker_style, "blocks"),
4072 addl, indent, colorize(blocker_style, str(resolved)))
4073 block_parents = self._dynamic_config._blocker_parents.parent_nodes(x)
4074 block_parents = set([pnode[2] for pnode in block_parents])
4075 block_parents = ", ".join(block_parents)
4077 addl += colorize(blocker_style,
4078 " (\"%s\" is blocking %s)") % \
4079 (str(x.atom).lstrip("!"), block_parents)
4081 addl += colorize(blocker_style,
4082 " (is blocking %s)") % block_parents
4083 if isinstance(x, Blocker) and x.satisfied:
4088 blockers.append(addl)
4091 pkg_merge = ordered and pkg_status == "merge"
4092 if not pkg_merge and pkg_status == "merge":
4093 pkg_status = "nomerge"
4094 built = pkg_type != "ebuild"
4095 installed = pkg_type == "installed"
4097 metadata = pkg.metadata
4099 repo_name = metadata["repository"]
4100 if pkg.type_name == "ebuild":
4101 ebuild_path = portdb.findname(pkg.cpv)
4102 if ebuild_path is None:
4103 raise AssertionError(
4104 "ebuild not found for '%s'" % pkg.cpv)
4105 repo_path_real = os.path.dirname(os.path.dirname(
4106 os.path.dirname(ebuild_path)))
4108 repo_path_real = portdb.getRepositoryPath(repo_name)
4109 pkg_use = list(pkg.use.enabled)
4110 if not pkg.built and pkg.operation == 'merge' and \
4111 'fetch' in pkg.metadata.restrict:
4114 counters.restrict_fetch += 1
4115 if portdb.fetch_check(pkg_key, pkg_use):
4118 counters.restrict_fetch_satisfied += 1
4120 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4121 #param is used for -u, where you still *do* want to see when something is being upgraded.
4124 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4125 if vardb.cpv_exists(pkg_key):
4126 addl=" "+yellow("R")+fetch+" "
4129 counters.reinst += 1
4130 elif pkg_status == "uninstall":
4131 counters.uninst += 1
4132 # filter out old-style virtual matches
4133 elif installed_versions and \
4134 portage.cpv_getkey(installed_versions[0]) == \
4135 portage.cpv_getkey(pkg_key):
4136 myinslotlist = vardb.match(pkg.slot_atom)
4137 # If this is the first install of a new-style virtual, we
4138 # need to filter out old-style virtual matches.
4139 if myinslotlist and \
4140 portage.cpv_getkey(myinslotlist[0]) != \
4141 portage.cpv_getkey(pkg_key):
4144 myoldbest = myinslotlist[:]
4146 if not portage.dep.cpvequal(pkg_key,
4147 portage.best([pkg_key] + myoldbest)):
4149 addl += turquoise("U")+blue("D")
4151 counters.downgrades += 1
4154 addl += turquoise("U") + " "
4156 counters.upgrades += 1
4158 # New slot, mark it new.
4159 addl = " " + green("NS") + fetch + " "
4160 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4162 counters.newslot += 1
4164 if "--changelog" in self._frozen_config.myopts:
4165 inst_matches = vardb.match(pkg.slot_atom)
4167 ebuild_path_cl = ebuild_path
4168 if ebuild_path_cl is None:
4170 ebuild_path_cl = portdb.findname(pkg.cpv)
4172 if ebuild_path_cl is not None:
4173 changelogs.extend(calc_changelog(
4174 ebuild_path_cl, inst_matches[0], pkg.cpv))
4176 addl = " " + green("N") + " " + fetch + " "
4185 forced_flags = set()
4186 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
4187 forced_flags.update(pkgsettings.useforce)
4188 forced_flags.update(pkgsettings.usemask)
4190 cur_use = [flag for flag in pkg.use.enabled \
4191 if flag in pkg.iuse.all]
4192 cur_iuse = sorted(pkg.iuse.all)
4194 if myoldbest and myinslotlist:
4195 previous_cpv = myoldbest[0]
4197 previous_cpv = pkg.cpv
4198 if vardb.cpv_exists(previous_cpv):
4199 old_iuse, old_use = vardb.aux_get(
4200 previous_cpv, ["IUSE", "USE"])
4201 old_iuse = list(set(
4202 filter_iuse_defaults(old_iuse.split())))
4204 old_use = old_use.split()
4211 old_use = [flag for flag in old_use if flag in old_iuse]
4213 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4215 use_expand.reverse()
4216 use_expand_hidden = \
4217 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4219 def map_to_use_expand(myvals, forcedFlags=False,
4223 for exp in use_expand:
4226 for val in myvals[:]:
4227 if val.startswith(exp.lower()+"_"):
4228 if val in forced_flags:
4229 forced[exp].add(val[len(exp)+1:])
4230 ret[exp].append(val[len(exp)+1:])
4233 forced["USE"] = [val for val in myvals \
4234 if val in forced_flags]
4236 for exp in use_expand_hidden:
4242 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4243 # are the only thing that triggered reinstallation.
4244 reinst_flags_map = {}
4245 reinstall_for_flags = self._dynamic_config._reinstall_nodes.get(pkg)
4246 reinst_expand_map = None
4247 if reinstall_for_flags:
4248 reinst_flags_map = map_to_use_expand(
4249 list(reinstall_for_flags), removeHidden=False)
4250 for k in list(reinst_flags_map):
4251 if not reinst_flags_map[k]:
4252 del reinst_flags_map[k]
4253 if not reinst_flags_map.get("USE"):
4254 reinst_expand_map = reinst_flags_map.copy()
4255 reinst_expand_map.pop("USE", None)
4256 if reinst_expand_map and \
4257 not set(reinst_expand_map).difference(
4259 use_expand_hidden = \
4260 set(use_expand_hidden).difference(
4263 cur_iuse_map, iuse_forced = \
4264 map_to_use_expand(cur_iuse, forcedFlags=True)
4265 cur_use_map = map_to_use_expand(cur_use)
4266 old_iuse_map = map_to_use_expand(old_iuse)
4267 old_use_map = map_to_use_expand(old_use)
4270 use_expand.insert(0, "USE")
4272 for key in use_expand:
4273 if key in use_expand_hidden:
4275 verboseadd += create_use_string(key.upper(),
4276 cur_iuse_map[key], iuse_forced[key],
4277 cur_use_map[key], old_iuse_map[key],
4278 old_use_map[key], is_new,
4279 reinst_flags_map.get(key))
4284 if pkg_type == "ebuild" and pkg_merge:
4286 myfilesdict = portdb.getfetchsizes(pkg_key,
4287 useflags=pkg_use, debug=self._frozen_config.edebug)
4288 except portage.exception.InvalidDependString as e:
4289 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4290 show_invalid_depstring_notice(x, src_uri, str(e))
4293 if myfilesdict is None:
4294 myfilesdict="[empty/missing/bad digest]"
4296 for myfetchfile in myfilesdict:
4297 if myfetchfile not in myfetchlist:
4298 mysize+=myfilesdict[myfetchfile]
4299 myfetchlist.append(myfetchfile)
4301 counters.totalsize += mysize
4302 verboseadd += format_size(mysize)
4305 # assign index for a previous version in the same slot
4306 has_previous = False
4307 repo_name_prev = None
4308 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4310 slot_matches = vardb.match(slot_atom)
4313 repo_name_prev = vardb.aux_get(slot_matches[0],
4316 # now use the data to generate output
4317 if pkg.installed or not has_previous:
4318 repoadd = repo_display.repoStr(repo_path_real)
4320 repo_path_prev = None
4322 repo_path_prev = portdb.getRepositoryPath(
4324 if repo_path_prev == repo_path_real:
4325 repoadd = repo_display.repoStr(repo_path_real)
4327 repoadd = "%s=>%s" % (
4328 repo_display.repoStr(repo_path_prev),
4329 repo_display.repoStr(repo_path_real))
4331 repoadd_set.add(repoadd)
4333 xs = [portage.cpv_getkey(pkg_key)] + \
4334 list(portage.catpkgsplit(pkg_key)[2:])
4341 if "COLUMNWIDTH" in self._frozen_config.settings:
4343 mywidth = int(self._frozen_config.settings["COLUMNWIDTH"])
4344 except ValueError as e:
4345 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4347 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4348 self._frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
4350 oldlp = mywidth - 30
4353 # Convert myoldbest from a list to a string.
4357 for pos, key in enumerate(myoldbest):
4358 key = portage.catpkgsplit(key)[2] + \
4359 "-" + portage.catpkgsplit(key)[3]
4360 if key[-3:] == "-r0":
4362 myoldbest[pos] = key
4363 myoldbest = blue("["+", ".join(myoldbest)+"]")
4366 root_config = self._frozen_config.roots[myroot]
4367 system_set = root_config.sets["system"]
4368 world_set = root_config.sets["world"]
4373 pkg_system = system_set.findAtomForPackage(pkg)
4374 pkg_world = world_set.findAtomForPackage(pkg)
4375 if not (oneshot or pkg_world) and \
4376 myroot == self._frozen_config.target_root and \
4377 favorites_set.findAtomForPackage(pkg):
4378 # Maybe it will be added to world now.
4379 if create_world_atom(pkg, favorites_set, root_config):
4381 except portage.exception.InvalidDependString:
4382 # This is reported elsewhere if relevant.
4385 def pkgprint(pkg_str):
4388 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4390 return colorize("PKG_MERGE_WORLD", pkg_str)
4392 return colorize("PKG_MERGE", pkg_str)
4393 elif pkg_status == "uninstall":
4394 return colorize("PKG_UNINSTALL", pkg_str)
4397 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4399 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4401 return colorize("PKG_NOMERGE", pkg_str)
4403 if 'interactive' in pkg.metadata.properties and \
4404 pkg.operation == 'merge':
4405 addl = colorize("WARN", "I") + addl[1:]
4407 counters.interactive += 1
4412 if "--columns" in self._frozen_config.myopts:
4413 if "--quiet" in self._frozen_config.myopts:
4414 myprint=addl+" "+indent+pkgprint(pkg_cp)
4415 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4416 myprint=myprint+myoldbest
4417 myprint=myprint+darkgreen("to "+x[1])
4421 myprint = "[%s] %s%s" % \
4422 (pkgprint(pkg_status.ljust(13)),
4423 indent, pkgprint(pkg.cp))
4425 myprint = "[%s %s] %s%s" % \
4426 (pkgprint(pkg.type_name), addl,
4427 indent, pkgprint(pkg.cp))
4428 if (newlp-nc_len(myprint)) > 0:
4429 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4430 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4431 if (oldlp-nc_len(myprint)) > 0:
4432 myprint=myprint+" "*(oldlp-nc_len(myprint))
4433 myprint=myprint+myoldbest
4434 myprint += darkgreen("to " + pkg.root)
4437 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4439 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4440 myprint += indent + pkgprint(pkg_key) + " " + \
4441 myoldbest + darkgreen("to " + myroot)
4443 if "--columns" in self._frozen_config.myopts:
4444 if "--quiet" in self._frozen_config.myopts:
4445 myprint=addl+" "+indent+pkgprint(pkg_cp)
4446 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4447 myprint=myprint+myoldbest
4451 myprint = "[%s] %s%s" % \
4452 (pkgprint(pkg_status.ljust(13)),
4453 indent, pkgprint(pkg.cp))
4455 myprint = "[%s %s] %s%s" % \
4456 (pkgprint(pkg.type_name), addl,
4457 indent, pkgprint(pkg.cp))
4458 if (newlp-nc_len(myprint)) > 0:
4459 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4460 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4461 if (oldlp-nc_len(myprint)) > 0:
4462 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4463 myprint += myoldbest
4466 myprint = "[%s] %s%s %s" % \
4467 (pkgprint(pkg_status.ljust(13)),
4468 indent, pkgprint(pkg.cpv),
4471 myprint = "[%s %s] %s%s %s" % \
4472 (pkgprint(pkg_type), addl, indent,
4473 pkgprint(pkg.cpv), myoldbest)
4475 if columns and pkg.operation == "uninstall":
4477 p.append((myprint, verboseadd, repoadd))
4479 if "--tree" not in self._frozen_config.myopts and \
4480 "--quiet" not in self._frozen_config.myopts and \
4481 not self._frozen_config._opts_no_restart.intersection(self._frozen_config.myopts) and \
4482 pkg.root == self._frozen_config._running_root.root and \
4483 portage.match_from_list(
4484 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4485 not vardb.cpv_exists(pkg.cpv) and \
4486 "--quiet" not in self._frozen_config.myopts:
4487 if mylist_index < len(mylist) - 1:
4488 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4489 p.append(colorize("WARN", " then resume the merge."))
4492 show_repos = repoadd_set and repoadd_set != set(["0"])
4495 if isinstance(x, basestring):
4496 out.write("%s\n" % (x,))
4499 myprint, verboseadd, repoadd = x
4502 myprint += " " + verboseadd
4504 if show_repos and repoadd:
4505 myprint += " " + teal("[%s]" % repoadd)
4507 out.write("%s\n" % (myprint,))
4516 # In python-2.x, str() can trigger a UnicodeEncodeError here,
4517 # so call __str__() directly.
4518 writemsg_stdout(repo_display.__str__(), noiselevel=-1)
4520 if "--changelog" in self._frozen_config.myopts:
4521 writemsg_stdout('\n', noiselevel=-1)
4522 for revision,text in changelogs:
4523 writemsg_stdout(bold('*'+revision) + '\n' + text,
4529 def display_problems(self):
4531 Display problems with the dependency graph such as slot collisions.
4532 This is called internally by display() to show the problems _after_
4533 the merge list where it is most likely to be seen, but if display()
4534 is not going to be called then this method should be called explicitly
4535 to ensure that the user is notified of problems with the graph.
4537 All output goes to stderr, except for unsatisfied dependencies which
4538 go to stdout for parsing by programs such as autounmask.
4541 # Note that show_masked_packages() sends it's output to
4542 # stdout, and some programs such as autounmask parse the
4543 # output in cases when emerge bails out. However, when
4544 # show_masked_packages() is called for installed packages
4545 # here, the message is a warning that is more appropriate
4546 # to send to stderr, so temporarily redirect stdout to
4547 # stderr. TODO: Fix output code so there's a cleaner way
4548 # to redirect everything to stderr.
4553 sys.stdout = sys.stderr
4554 self._display_problems()
4560 # This goes to stdout for parsing by programs like autounmask.
4561 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4562 self._show_unsatisfied_dep(*pargs, **kwargs)
4564 def _display_problems(self):
4565 if self._dynamic_config._circular_deps_for_display is not None:
4566 self._show_circular_deps(
4567 self._dynamic_config._circular_deps_for_display)
4569 # The user is only notified of a slot conflict if
4570 # there are no unresolvable blocker conflicts.
4571 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4572 self._show_unsatisfied_blockers(
4573 self._dynamic_config._unsatisfied_blockers_for_display)
4574 elif self._dynamic_config._slot_collision_info:
4575 self._show_slot_collision_notice()
4577 self._show_missed_update()
4579 # TODO: Add generic support for "set problem" handlers so that
4580 # the below warnings aren't special cases for world only.
4582 if self._dynamic_config._missing_args:
4583 world_problems = False
4584 if "world" in self._dynamic_config._sets:
4585 # Filter out indirect members of world (from nested sets)
4586 # since only direct members of world are desired here.
4587 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["world"]
4588 for arg, atom in self._dynamic_config._missing_args:
4589 if arg.name == "world" and atom in world_set:
4590 world_problems = True
4594 sys.stderr.write("\n!!! Problems have been " + \
4595 "detected with your world file\n")
4596 sys.stderr.write("!!! Please run " + \
4597 green("emaint --check world")+"\n\n")
4599 if self._dynamic_config._missing_args:
4600 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4601 " Ebuilds for the following packages are either all\n")
4602 sys.stderr.write(colorize("BAD", "!!!") + \
4603 " masked or don't exist:\n")
4604 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4605 self._dynamic_config._missing_args) + "\n")
4607 if self._dynamic_config._pprovided_args:
4609 for arg, atom in self._dynamic_config._pprovided_args:
4610 if isinstance(arg, SetArg):
4612 arg_atom = (atom, atom)
4615 arg_atom = (arg.arg, atom)
4616 refs = arg_refs.setdefault(arg_atom, [])
4617 if parent not in refs:
4620 msg.append(bad("\nWARNING: "))
4621 if len(self._dynamic_config._pprovided_args) > 1:
4622 msg.append("Requested packages will not be " + \
4623 "merged because they are listed in\n")
4625 msg.append("A requested package will not be " + \
4626 "merged because it is listed in\n")
4627 msg.append("package.provided:\n\n")
4628 problems_sets = set()
4629 for (arg, atom), refs in arg_refs.items():
4632 problems_sets.update(refs)
4634 ref_string = ", ".join(["'%s'" % name for name in refs])
4635 ref_string = " pulled in by " + ref_string
4636 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4638 if "world" in problems_sets:
4639 msg.append("This problem can be solved in one of the following ways:\n\n")
4640 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4641 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4642 msg.append(" C) Remove offending entries from package.provided.\n\n")
4643 msg.append("The best course of action depends on the reason that an offending\n")
4644 msg.append("package.provided entry exists.\n\n")
4645 sys.stderr.write("".join(msg))
4647 masked_packages = []
4648 for pkg in self._dynamic_config._masked_installed:
4649 root_config = pkg.root_config
4650 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4651 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4652 masked_packages.append((root_config, pkgsettings,
4653 pkg.cpv, pkg.metadata, mreasons))
4655 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4656 " The following installed packages are masked:\n")
4657 show_masked_packages(masked_packages)
4661 def saveNomergeFavorites(self):
4662 """Find atoms in favorites that are not in the mergelist and add them
4663 to the world file if necessary."""
4664 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4665 "--oneshot", "--onlydeps", "--pretend"):
4666 if x in self._frozen_config.myopts:
4668 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4669 world_set = root_config.sets["world"]
4671 world_locked = False
4672 if hasattr(world_set, "lock"):
4676 if hasattr(world_set, "load"):
4677 world_set.load() # maybe it's changed on disk
4679 args_set = self._dynamic_config._sets["args"]
4680 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
4681 added_favorites = set()
4682 for x in self._dynamic_config._set_nodes:
4683 pkg_type, root, pkg_key, pkg_status = x
4684 if pkg_status != "nomerge":
4688 myfavkey = create_world_atom(x, args_set, root_config)
4690 if myfavkey in added_favorites:
4692 added_favorites.add(myfavkey)
4693 except portage.exception.InvalidDependString as e:
4694 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4695 (pkg_key, str(e)), noiselevel=-1)
4696 writemsg("!!! see '%s'\n\n" % os.path.join(
4697 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4700 for k in self._dynamic_config._sets:
4701 if k in ("args", "world") or not root_config.sets[k].world_candidate:
4706 all_added.append(SETPREFIX + k)
4707 all_added.extend(added_favorites)
4710 print(">>> Recording %s in \"world\" favorites file..." % \
4711 colorize("INFORM", str(a)))
4713 world_set.update(all_added)
4718 def _loadResumeCommand(self, resume_data, skip_masked=True,
4721 Add a resume command to the graph and validate it in the process. This
4722 will raise a PackageNotFound exception if a package is not available.
4725 if not isinstance(resume_data, dict):
4728 mergelist = resume_data.get("mergelist")
4729 if not isinstance(mergelist, list):
4732 fakedb = self._dynamic_config.mydbapi
4733 trees = self._frozen_config.trees
4734 serialized_tasks = []
4737 if not (isinstance(x, list) and len(x) == 4):
4739 pkg_type, myroot, pkg_key, action = x
4740 if pkg_type not in self.pkg_tree_map:
4742 if action != "merge":
4744 root_config = self._frozen_config.roots[myroot]
4746 pkg = self._pkg(pkg_key, pkg_type, root_config)
4747 except portage.exception.PackageNotFound:
4748 # It does no exist or it is corrupt.
4750 # TODO: log these somewhere
4754 if "merge" == pkg.operation and \
4755 not visible(root_config.settings, pkg):
4757 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4759 self._dynamic_config._unsatisfied_deps_for_display.append(
4760 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
4762 fakedb[myroot].cpv_inject(pkg)
4763 serialized_tasks.append(pkg)
4764 self._spinner_update()
4766 if self._dynamic_config._unsatisfied_deps_for_display:
4769 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
4770 self._dynamic_config._serialized_tasks_cache = serialized_tasks
4771 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
4773 self._select_package = self._select_pkg_from_graph
4774 self._dynamic_config.myparams["selective"] = True
4775 # Always traverse deep dependencies in order to account for
4776 # potentially unsatisfied dependencies of installed packages.
4777 # This is necessary for correct --keep-going or --resume operation
4778 # in case a package from a group of circularly dependent packages
4779 # fails. In this case, a package which has recently been installed
4780 # may have an unsatisfied circular dependency (pulled in by
4781 # PDEPEND, for example). So, even though a package is already
4782 # installed, it may not have all of it's dependencies satisfied, so
4783 # it may not be usable. If such a package is in the subgraph of
4784 # deep depenedencies of a scheduled build, that build needs to
4785 # be cancelled. In order for this type of situation to be
4786 # recognized, deep traversal of dependencies is required.
4787 self._dynamic_config.myparams["deep"] = True
4789 favorites = resume_data.get("favorites")
4790 args_set = self._dynamic_config._sets["args"]
4791 if isinstance(favorites, list):
4792 args = self._load_favorites(favorites)
4796 for task in serialized_tasks:
4797 if isinstance(task, Package) and \
4798 task.operation == "merge":
4799 if not self._add_pkg(task, None):
4802 # Packages for argument atoms need to be explicitly
4803 # added via _add_pkg() so that they are included in the
4804 # digraph (needed at least for --tree display).
4806 for atom in arg.set:
4807 pkg, existing_node = self._select_package(
4808 arg.root_config.root, atom)
4809 if existing_node is None and \
4811 if not self._add_pkg(pkg, Dependency(atom=atom,
4812 root=pkg.root, parent=arg)):
4815 # Allow unsatisfied deps here to avoid showing a masking
4816 # message for an unsatisfied dep that isn't necessarily
4818 if not self._create_graph(allow_unsatisfied=True):
4821 unsatisfied_deps = []
4822 for dep in self._dynamic_config._unsatisfied_deps:
4823 if not isinstance(dep.parent, Package):
4825 if dep.parent.operation == "merge":
4826 unsatisfied_deps.append(dep)
4829 # For unsatisfied deps of installed packages, only account for
4830 # them if they are in the subgraph of dependencies of a package
4831 # which is scheduled to be installed.
4832 unsatisfied_install = False
4834 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
4836 node = dep_stack.pop()
4837 if not isinstance(node, Package):
4839 if node.operation == "merge":
4840 unsatisfied_install = True
4842 if node in traversed:
4845 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
4847 if unsatisfied_install:
4848 unsatisfied_deps.append(dep)
4850 if masked_tasks or unsatisfied_deps:
4851 # This probably means that a required package
4852 # was dropped via --skipfirst. It makes the
4853 # resume list invalid, so convert it to a
4854 # UnsatisfiedResumeDep exception.
4855 raise self.UnsatisfiedResumeDep(self,
4856 masked_tasks + unsatisfied_deps)
4857 self._dynamic_config._serialized_tasks_cache = None
4860 except self._unknown_internal_error:
4865 def _load_favorites(self, favorites):
4867 Use a list of favorites to resume state from a
4868 previous select_files() call. This creates similar
4869 DependencyArg instances to those that would have
4870 been created by the original select_files() call.
4871 This allows Package instances to be matched with
4872 DependencyArg instances during graph creation.
4874 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4875 getSetAtoms = root_config.setconfig.getSetAtoms
4876 sets = root_config.sets
4879 if not isinstance(x, basestring):
4881 if x in ("system", "world"):
4883 if x.startswith(SETPREFIX):
4884 s = x[len(SETPREFIX):]
4887 if s in self._dynamic_config._sets:
4889 # Recursively expand sets so that containment tests in
4890 # self._get_parent_sets() properly match atoms in nested
4891 # sets (like if world contains system).
4892 expanded_set = InternalPackageSet(
4893 initial_atoms=getSetAtoms(s))
4894 self._dynamic_config._sets[s] = expanded_set
4895 args.append(SetArg(arg=x, set=expanded_set,
4896 root_config=root_config))
4900 except portage.exception.InvalidAtom:
4902 args.append(AtomArg(arg=x, atom=x,
4903 root_config=root_config))
4905 self._set_args(args)
4908 class UnsatisfiedResumeDep(portage.exception.PortageException):
4910 A dependency of a resume list is not installed. This
4911 can occur when a required package is dropped from the
4912 merge list via --skipfirst.
4914 def __init__(self, depgraph, value):
4915 portage.exception.PortageException.__init__(self, value)
4916 self.depgraph = depgraph
4918 class _internal_exception(portage.exception.PortageException):
4919 def __init__(self, value=""):
4920 portage.exception.PortageException.__init__(self, value)
4922 class _unknown_internal_error(_internal_exception):
4924 Used by the depgraph internally to terminate graph creation.
4925 The specific reason for the failure should have been dumped
4926 to stderr, unfortunately, the exact reason for the failure
4930 class _serialize_tasks_retry(_internal_exception):
4932 This is raised by the _serialize_tasks() method when it needs to
4933 be called again for some reason. The only case that it's currently
4934 used for is when neglected dependencies need to be added to the
4935 graph in order to avoid making a potentially unsafe decision.
4938 def need_restart(self):
4939 return self._dynamic_config._need_restart
4941 def get_runtime_pkg_mask(self):
4942 return self._dynamic_config._runtime_pkg_mask.copy()
4944 class _dep_check_composite_db(portage.dbapi):
4946 A dbapi-like interface that is optimized for use in dep_check() calls.
4947 This is built on top of the existing depgraph package selection logic.
4948 Some packages that have been added to the graph may be masked from this
4949 view in order to influence the atom preference selection that occurs
4952 def __init__(self, depgraph, root):
4953 portage.dbapi.__init__(self)
4954 self._depgraph = depgraph
4956 self._match_cache = {}
4957 self._cpv_pkg_map = {}
4959 def _clear_cache(self):
4960 self._match_cache.clear()
4961 self._cpv_pkg_map.clear()
4963 def match(self, atom):
4964 ret = self._match_cache.get(atom)
4969 atom = self._dep_expand(atom)
4970 pkg, existing = self._depgraph._select_package(self._root, atom)
4974 # Return the highest available from select_package() as well as
4975 # any matching slots in the graph db.
4977 slots.add(pkg.metadata["SLOT"])
4978 if pkg.cp.startswith("virtual/"):
4979 # For new-style virtual lookahead that occurs inside
4980 # dep_check(), examine all slots. This is needed
4981 # so that newer slots will not unnecessarily be pulled in
4982 # when a satisfying lower slot is already installed. For
4983 # example, if virtual/jdk-1.4 is satisfied via kaffe then
4984 # there's no need to pull in a newer slot to satisfy a
4985 # virtual/jdk dependency.
4986 for db, pkg_type, built, installed, db_keys in \
4987 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
4988 for cpv in db.match(atom):
4989 if portage.cpv_getkey(cpv) != pkg.cp:
4991 slots.add(db.aux_get(cpv, ["SLOT"])[0])
4993 if self._visible(pkg):
4994 self._cpv_pkg_map[pkg.cpv] = pkg
4996 slots.remove(pkg.metadata["SLOT"])
4998 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
4999 pkg, existing = self._depgraph._select_package(
5000 self._root, slot_atom)
5003 if not self._visible(pkg):
5005 self._cpv_pkg_map[pkg.cpv] = pkg
5008 self._cpv_sort_ascending(ret)
5009 self._match_cache[orig_atom] = ret
5012 def _visible(self, pkg):
5013 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5015 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5016 except (StopIteration, portage.exception.InvalidDependString):
5023 self._depgraph._frozen_config.pkgsettings[pkg.root], pkg):
5025 except portage.exception.InvalidDependString:
5027 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5028 self._root].get(pkg.slot_atom)
5029 if in_graph is None:
5030 # Mask choices for packages which are not the highest visible
5031 # version within their slot (since they usually trigger slot
5033 highest_visible, in_graph = self._depgraph._select_package(
5034 self._root, pkg.slot_atom)
5035 if pkg != highest_visible:
5037 elif in_graph != pkg:
5038 # Mask choices for packages that would trigger a slot
5039 # conflict with a previously selected package.
5043 def _dep_expand(self, atom):
5045 This is only needed for old installed packages that may
5046 contain atoms that are not fully qualified with a specific
5047 category. Emulate the cpv_expand() function that's used by
5048 dbapi.match() in cases like this. If there are multiple
5049 matches, it's often due to a new-style virtual that has
5050 been added, so try to filter those out to avoid raising
5053 root_config = self._depgraph.roots[self._root]
5055 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5056 if len(expanded_atoms) > 1:
5057 non_virtual_atoms = []
5058 for x in expanded_atoms:
5059 if not portage.dep_getkey(x).startswith("virtual/"):
5060 non_virtual_atoms.append(x)
5061 if len(non_virtual_atoms) == 1:
5062 expanded_atoms = non_virtual_atoms
5063 if len(expanded_atoms) > 1:
5064 # compatible with portage.cpv_expand()
5065 raise portage.exception.AmbiguousPackageName(
5066 [portage.dep_getkey(x) for x in expanded_atoms])
5068 atom = expanded_atoms[0]
5070 null_atom = Atom(insert_category_into_atom(atom, "null"))
5071 cat, atom_pn = portage.catsplit(null_atom.cp)
5072 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5074 # Allow the resolver to choose which virtual.
5075 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5080 def aux_get(self, cpv, wants):
5081 metadata = self._cpv_pkg_map[cpv].metadata
5082 return [metadata.get(x, "") for x in wants]
5084 def match_pkgs(self, atom):
5085 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5087 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5089 if "--quiet" in myopts:
5090 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5091 print("!!! one of the following fully-qualified ebuild names instead:\n")
5092 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5093 print(" " + colorize("INFORM", cp))
5096 s = search(root_config, spinner, "--searchdesc" in myopts,
5097 "--quiet" not in myopts, "--usepkg" in myopts,
5098 "--usepkgonly" in myopts)
5099 null_cp = portage.dep_getkey(insert_category_into_atom(
5101 cat, atom_pn = portage.catsplit(null_cp)
5102 s.searchkey = atom_pn
5103 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5106 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5107 print("!!! one of the above fully-qualified ebuild names instead.\n")
5109 def insert_category_into_atom(atom, category):
5110 alphanum = re.search(r'\w', atom)
5112 ret = atom[:alphanum.start()] + "%s/" % category + \
5113 atom[alphanum.start():]
5118 def backtrack_depgraph(settings, trees, myopts, myparams,
5119 myaction, myfiles, spinner):
5121 Raises PackageSetNotFound if myfiles contains a missing package set.
5124 runtime_pkg_mask = None
5125 allow_backtracking = True
5127 frozen_config = _frozen_depgraph_config(settings, trees,
5130 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5131 frozen_config=frozen_config,
5132 allow_backtracking=allow_backtracking,
5133 runtime_pkg_mask=runtime_pkg_mask)
5134 success, favorites = mydepgraph.select_files(myfiles)
5136 if mydepgraph.need_restart() and backtracked < backtrack_max:
5137 runtime_pkg_mask = mydepgraph.get_runtime_pkg_mask()
5139 elif backtracked and allow_backtracking:
5140 if "--debug" in myopts:
5142 "\n\nbacktracking aborted after %s tries\n\n" % \
5143 backtracked, noiselevel=-1, level=logging.DEBUG)
5144 # Backtracking failed, so disable it and do
5145 # a plain dep calculation + error message.
5146 allow_backtracking = False
5147 runtime_pkg_mask = None
5152 return (success, mydepgraph, favorites)
5154 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5156 Construct a depgraph for the given resume list. This will raise
5157 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5159 @returns: (success, depgraph, dropped_tasks)
5162 skip_unsatisfied = True
5163 mergelist = mtimedb["resume"]["mergelist"]
5164 dropped_tasks = set()
5165 frozen_config = _frozen_depgraph_config(settings, trees,
5168 mydepgraph = depgraph(settings, trees,
5169 myopts, myparams, spinner, frozen_config=frozen_config)
5171 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5172 skip_masked=skip_masked)
5173 except depgraph.UnsatisfiedResumeDep as e:
5174 if not skip_unsatisfied:
5177 graph = mydepgraph._dynamic_config.digraph
5178 unsatisfied_parents = dict((dep.parent, dep.parent) \
5180 traversed_nodes = set()
5181 unsatisfied_stack = list(unsatisfied_parents)
5182 while unsatisfied_stack:
5183 pkg = unsatisfied_stack.pop()
5184 if pkg in traversed_nodes:
5186 traversed_nodes.add(pkg)
5188 # If this package was pulled in by a parent
5189 # package scheduled for merge, removing this
5190 # package may cause the the parent package's
5191 # dependency to become unsatisfied.
5192 for parent_node in graph.parent_nodes(pkg):
5193 if not isinstance(parent_node, Package) \
5194 or parent_node.operation not in ("merge", "nomerge"):
5197 graph.child_nodes(parent_node,
5198 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5199 if pkg in unsatisfied:
5200 unsatisfied_parents[parent_node] = parent_node
5201 unsatisfied_stack.append(parent_node)
5203 pruned_mergelist = []
5205 if isinstance(x, list) and \
5206 tuple(x) not in unsatisfied_parents:
5207 pruned_mergelist.append(x)
5209 # If the mergelist doesn't shrink then this loop is infinite.
5210 if len(pruned_mergelist) == len(mergelist):
5211 # This happens if a package can't be dropped because
5212 # it's already installed, but it has unsatisfied PDEPEND.
5214 mergelist[:] = pruned_mergelist
5216 # Exclude installed packages that have been removed from the graph due
5217 # to failure to build/install runtime dependencies after the dependent
5218 # package has already been installed.
5219 dropped_tasks.update(pkg for pkg in \
5220 unsatisfied_parents if pkg.operation != "nomerge")
5221 mydepgraph.break_refs(unsatisfied_parents)
5223 del e, graph, traversed_nodes, \
5224 unsatisfied_parents, unsatisfied_stack
5228 return (success, mydepgraph, dropped_tasks)
5230 def get_mask_info(root_config, cpv, pkgsettings,
5231 db, pkg_type, built, installed, db_keys):
5234 metadata = dict(zip(db_keys,
5235 db.aux_get(cpv, db_keys)))
5239 if metadata is None:
5240 mreasons = ["corruption"]
5242 eapi = metadata['EAPI']
5245 if not portage.eapi_is_supported(eapi):
5246 mreasons = ['EAPI %s' % eapi]
5248 pkg = Package(type_name=pkg_type, root_config=root_config,
5249 cpv=cpv, built=built, installed=installed, metadata=metadata)
5250 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5251 return metadata, mreasons
5253 def show_masked_packages(masked_packages):
5254 shown_licenses = set()
5255 shown_comments = set()
5256 # Maybe there is both an ebuild and a binary. Only
5257 # show one of them to avoid redundant appearance.
5259 have_eapi_mask = False
5260 for (root_config, pkgsettings, cpv,
5261 metadata, mreasons) in masked_packages:
5262 if cpv in shown_cpvs:
5265 comment, filename = None, None
5266 if "package.mask" in mreasons:
5267 comment, filename = \
5268 portage.getmaskingreason(
5269 cpv, metadata=metadata,
5270 settings=pkgsettings,
5271 portdb=root_config.trees["porttree"].dbapi,
5272 return_location=True)
5273 missing_licenses = []
5275 if not portage.eapi_is_supported(metadata["EAPI"]):
5276 have_eapi_mask = True
5278 missing_licenses = \
5279 pkgsettings._getMissingLicenses(
5281 except portage.exception.InvalidDependString:
5282 # This will have already been reported
5283 # above via mreasons.
5286 print("- "+cpv+" (masked by: "+", ".join(mreasons)+")")
5288 if comment and comment not in shown_comments:
5289 writemsg_stdout(filename + ":\n" + comment + "\n",
5291 shown_comments.add(comment)
5292 portdb = root_config.trees["porttree"].dbapi
5293 for l in missing_licenses:
5294 l_path = portdb.findLicensePath(l)
5295 if l in shown_licenses:
5297 msg = ("A copy of the '%s' license" + \
5298 " is located at '%s'.") % (l, l_path)
5301 shown_licenses.add(l)
5302 return have_eapi_mask
5304 def show_mask_docs():
5305 print("For more information, see the MASKED PACKAGES section in the emerge")
5306 print("man page or refer to the Gentoo Handbook.")
5308 def filter_iuse_defaults(iuse):
5310 if flag.startswith("+") or flag.startswith("-"):
5315 def show_blocker_docs_link():
5317 print("For more information about " + bad("Blocked Packages") + ", please refer to the following")
5318 print("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):")
5320 print("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked")
5323 def get_masking_status(pkg, pkgsettings, root_config):
5325 mreasons = portage.getmaskingstatus(
5326 pkg, settings=pkgsettings,
5327 portdb=root_config.trees["porttree"].dbapi)
5329 if not pkg.installed:
5330 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5331 mreasons.append("CHOST: %s" % \
5332 pkg.metadata["CHOST"])
5334 for msg_type, msgs in pkg.invalid.items():
5336 mreasons.append("invalid: %s" % (msg,))
5338 if not pkg.metadata["SLOT"]:
5339 mreasons.append("invalid: SLOT is undefined")