1 # Copyright 1999-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 from __future__ import print_function
12 from itertools import chain
15 from portage import os
16 from portage import digraph
17 from portage.dep import Atom
18 from portage.output import bold, blue, colorize, create_color_func, darkblue, \
19 darkgreen, green, nc_len, red, teal, turquoise, yellow
20 bad = create_color_func("BAD")
21 from portage.sets import SETPREFIX
22 from portage.sets.base import InternalPackageSet
23 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
24 from portage.util import writemsg_level
26 from _emerge.AtomArg import AtomArg
27 from _emerge.Blocker import Blocker
28 from _emerge.BlockerCache import BlockerCache
29 from _emerge.BlockerDepPriority import BlockerDepPriority
30 from _emerge.changelog import calc_changelog
31 from _emerge.countdown import countdown
32 from _emerge.create_world_atom import create_world_atom
33 from _emerge.Dependency import Dependency
34 from _emerge.DependencyArg import DependencyArg
35 from _emerge.DepPriority import DepPriority
36 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
37 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
38 from _emerge.FakeVartree import FakeVartree
39 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
40 from _emerge.format_size import format_size
41 from _emerge.is_valid_package_atom import is_valid_package_atom
42 from _emerge.Package import Package
43 from _emerge.PackageArg import PackageArg
44 from _emerge.PackageCounters import PackageCounters
45 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
46 from _emerge.RepoDisplay import RepoDisplay
47 from _emerge.RootConfig import RootConfig
48 from _emerge.search import search
49 from _emerge.SetArg import SetArg
50 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
51 from _emerge.UnmergeDepPriority import UnmergeDepPriority
52 from _emerge.visible import visible
54 if sys.hexversion >= 0x3000000:
58 class _frozen_depgraph_config(object):
60 def __init__(self, settings, trees, myopts, spinner):
61 self.settings = settings
62 self.target_root = settings["ROOT"]
65 if settings.get("PORTAGE_DEBUG", "") == "1":
67 self.spinner = spinner
68 self._running_root = trees["/"]["root_config"]
69 self._opts_no_restart = frozenset(["--buildpkgonly",
70 "--fetchonly", "--fetch-all-uri", "--pretend"])
73 self._trees_orig = trees
75 # All Package instances
78 self.trees[myroot] = {}
79 # Create a RootConfig instance that references
80 # the FakeVartree instead of the real one.
81 self.roots[myroot] = RootConfig(
82 trees[myroot]["vartree"].settings,
84 trees[myroot]["root_config"].setconfig)
85 for tree in ("porttree", "bintree"):
86 self.trees[myroot][tree] = trees[myroot][tree]
87 self.trees[myroot]["vartree"] = \
88 FakeVartree(trees[myroot]["root_config"],
89 pkg_cache=self._pkg_cache)
90 self.pkgsettings[myroot] = portage.config(
91 clone=self.trees[myroot]["vartree"].settings)
93 self._required_set_names = set(["system", "world"])
95 class _dynamic_depgraph_config(object):
97 def __init__(self, depgraph, myparams, allow_backtracking,
99 self.myparams = myparams.copy()
100 self._allow_backtracking = allow_backtracking
101 # Maps slot atom to package for each Package added to the graph.
102 self._slot_pkg_map = {}
103 # Maps nodes to the reasons they were selected for reinstallation.
104 self._reinstall_nodes = {}
106 # Contains a filtered view of preferred packages that are selected
107 # from available repositories.
108 self._filtered_trees = {}
109 # Contains installed packages and new packages that have been added
111 self._graph_trees = {}
112 # Caches visible packages returned from _select_package, for use in
113 # depgraph._iter_atoms_for_pkg() SLOT logic.
114 self._visible_pkgs = {}
115 #contains the args created by select_files
116 self._initial_arg_list = []
117 self.digraph = portage.digraph()
118 # contains all sets added to the graph
120 # contains atoms given as arguments
121 self._sets["args"] = InternalPackageSet()
122 # contains all atoms from all sets added to the graph, including
123 # atoms given as arguments
124 self._set_atoms = InternalPackageSet()
125 self._atom_arg_map = {}
126 # contains all nodes pulled in by self._set_atoms
127 self._set_nodes = set()
128 # Contains only Blocker -> Uninstall edges
129 self._blocker_uninstalls = digraph()
130 # Contains only Package -> Blocker edges
131 self._blocker_parents = digraph()
132 # Contains only irrelevant Package -> Blocker edges
133 self._irrelevant_blockers = digraph()
134 # Contains only unsolvable Package -> Blocker edges
135 self._unsolvable_blockers = digraph()
136 # Contains all Blocker -> Blocked Package edges
137 self._blocked_pkgs = digraph()
138 # Contains world packages that have been protected from
139 # uninstallation but may not have been added to the graph
140 # if the graph is not complete yet.
141 self._blocked_world_pkgs = {}
142 self._slot_collision_info = {}
143 # Slot collision nodes are not allowed to block other packages since
144 # blocker validation is only able to account for one package per slot.
145 self._slot_collision_nodes = set()
146 self._parent_atoms = {}
147 self._slot_conflict_parent_atoms = set()
148 self._serialized_tasks_cache = None
149 self._scheduler_graph = None
150 self._displayed_list = None
151 self._pprovided_args = []
152 self._missing_args = []
153 self._masked_installed = set()
154 self._unsatisfied_deps_for_display = []
155 self._unsatisfied_blockers_for_display = None
156 self._circular_deps_for_display = None
158 self._dep_disjunctive_stack = []
159 self._unsatisfied_deps = []
160 self._initially_unsatisfied_deps = []
161 self._ignored_deps = []
162 self._highest_pkg_cache = {}
163 if runtime_pkg_mask is None:
164 runtime_pkg_mask = {}
166 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
167 runtime_pkg_mask.items())
168 self._runtime_pkg_mask = runtime_pkg_mask
169 self._need_restart = False
171 for myroot in depgraph._frozen_config.trees:
172 self._slot_pkg_map[myroot] = {}
173 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
174 preload_installed_pkgs = \
175 "--nodeps" not in depgraph._frozen_config.myopts and \
176 "--buildpkgonly" not in depgraph._frozen_config.myopts
177 # This fakedbapi instance will model the state that the vdb will
178 # have after new packages have been installed.
179 fakedb = PackageVirtualDbapi(vardb.settings)
180 if preload_installed_pkgs:
182 depgraph._spinner_update()
183 # This triggers metadata updates via FakeVartree.
184 vardb.aux_get(pkg.cpv, [])
185 fakedb.cpv_inject(pkg)
187 # Now that the vardb state is cached in our FakeVartree,
188 # we won't be needing the real vartree cache for awhile.
189 # To make some room on the heap, clear the vardbapi
191 depgraph._frozen_config._trees_orig[myroot
192 ]["vartree"].dbapi._clear_cache()
195 self.mydbapi[myroot] = fakedb
198 graph_tree.dbapi = fakedb
199 self._graph_trees[myroot] = {}
200 self._filtered_trees[myroot] = {}
201 # Substitute the graph tree for the vartree in dep_check() since we
202 # want atom selections to be consistent with package selections
203 # have already been made.
204 self._graph_trees[myroot]["porttree"] = graph_tree
205 self._graph_trees[myroot]["vartree"] = graph_tree
208 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
209 self._filtered_trees[myroot]["porttree"] = filtered_tree
210 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
212 # Passing in graph_tree as the vartree here could lead to better
213 # atom selections in some cases by causing atoms for packages that
214 # have been added to the graph to be preferred over other choices.
215 # However, it can trigger atom selections that result in
216 # unresolvable direct circular dependencies. For example, this
217 # happens with gwydion-dylan which depends on either itself or
218 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
219 # gwydion-dylan-bin needs to be selected in order to avoid a
220 # an unresolvable direct circular dependency.
222 # To solve the problem described above, pass in "graph_db" so that
223 # packages that have been added to the graph are distinguishable
224 # from other available packages and installed packages. Also, pass
225 # the parent package into self._select_atoms() calls so that
226 # unresolvable direct circular dependencies can be detected and
227 # avoided when possible.
228 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
229 self._filtered_trees[myroot]["vartree"] = \
230 depgraph._frozen_config.trees[myroot]["vartree"]
233 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
234 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
235 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
236 # (db, pkg_type, built, installed, db_keys)
237 if "--usepkgonly" not in depgraph._frozen_config.myopts:
238 db_keys = list(portdb._aux_cache_keys)
239 dbs.append((portdb, "ebuild", False, False, db_keys))
240 if "--usepkg" in depgraph._frozen_config.myopts:
241 db_keys = list(bindb._aux_cache_keys)
242 dbs.append((bindb, "binary", True, False, db_keys))
243 db_keys = list(depgraph._frozen_config._trees_orig[myroot
244 ]["vartree"].dbapi._aux_cache_keys)
245 dbs.append((vardb, "installed", True, True, db_keys))
246 self._filtered_trees[myroot]["dbs"] = dbs
247 if "--usepkg" in depgraph._frozen_config.myopts:
248 depgraph._frozen_config._trees_orig[myroot
249 ]["bintree"].populate(
250 "--getbinpkg" in depgraph._frozen_config.myopts,
251 "--getbinpkgonly" in depgraph._frozen_config.myopts)
253 class depgraph(object):
255 pkg_tree_map = RootConfig.pkg_tree_map
257 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
259 def __init__(self, settings, trees, myopts, myparams, spinner,
260 frozen_config=None, runtime_pkg_mask=None, allow_backtracking=False):
261 if frozen_config is None:
262 frozen_config = _frozen_depgraph_config(settings, trees,
264 self._frozen_config = frozen_config
265 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
266 allow_backtracking, runtime_pkg_mask)
268 self._select_atoms = self._select_atoms_highest_available
269 self._select_package = self._select_pkg_highest_available
271 def _spinner_update(self):
272 if self._frozen_config.spinner:
273 self._frozen_config.spinner.update()
275 def _show_missed_update(self):
277 if '--quiet' in self._frozen_config.myopts and \
278 '--debug' not in self._frozen_config.myopts:
281 # In order to minimize noise, show only the highest
282 # missed update from each SLOT.
284 for pkg, mask_reasons in \
285 self._dynamic_config._runtime_pkg_mask.items():
287 # Exclude installed here since we only
288 # want to show available updates.
290 k = (pkg.root, pkg.slot_atom)
291 if k in missed_updates:
292 other_pkg, mask_type, parent_atoms = missed_updates[k]
295 for mask_type, parent_atoms in mask_reasons.items():
298 missed_updates[k] = (pkg, mask_type, parent_atoms)
301 if not missed_updates:
304 missed_update_types = {}
305 for pkg, mask_type, parent_atoms in missed_updates.values():
306 missed_update_types.setdefault(mask_type,
307 []).append((pkg, parent_atoms))
309 self._show_missed_update_slot_conflicts(
310 missed_update_types.get("slot conflict"))
312 self._show_missed_update_unsatisfied_dep(
313 missed_update_types.get("missing dependency"))
315 def _show_missed_update_unsatisfied_dep(self, missed_updates):
317 if not missed_updates:
320 write = sys.stderr.write
321 backtrack_masked = []
323 for pkg, parent_atoms in missed_updates:
326 for parent, root, atom in parent_atoms:
327 self._show_unsatisfied_dep(root, atom, myparent=parent,
328 check_backtrack=True)
329 except self._backtrack_mask:
330 # This is displayed below in abbreviated form.
331 backtrack_masked.append((pkg, parent_atoms))
334 write("\n!!! The following update has been skipped " + \
335 "due to unsatisfied dependencies:\n\n")
337 write(str(pkg.slot_atom))
339 write(" for %s" % (pkg.root,))
342 for parent, root, atom in parent_atoms:
343 self._show_unsatisfied_dep(root, atom, myparent=parent)
347 # These are shown in abbreviated form, in order to avoid terminal
348 # flooding from mask messages as reported in bug #285832.
349 write("\n!!! The following update(s) have been skipped " + \
350 "due to unsatisfied dependencies\n" + \
351 "!!! triggered by backtracking:\n\n")
352 for pkg, parent_atoms in backtrack_masked:
353 write(str(pkg.slot_atom))
355 write(" for %s" % (pkg.root,))
360 def _show_missed_update_slot_conflicts(self, missed_updates):
362 if not missed_updates:
366 msg.append("\n!!! One or more updates have been skipped due to " + \
367 "a dependency conflict:\n\n")
370 for pkg, parent_atoms in missed_updates:
371 msg.append(str(pkg.slot_atom))
373 msg.append(" for %s" % (pkg.root,))
376 for parent, atom in parent_atoms:
380 msg.append(" conflicts with\n")
382 if isinstance(parent,
383 (PackageArg, AtomArg)):
384 # For PackageArg and AtomArg types, it's
385 # redundant to display the atom attribute.
386 msg.append(str(parent))
388 # Display the specific atom from SetArg or
390 msg.append("%s required by %s" % (atom, parent))
394 sys.stderr.write("".join(msg))
397 def _show_slot_collision_notice(self):
398 """Show an informational message advising the user to mask one of the
399 the packages. In some cases it may be possible to resolve this
400 automatically, but support for backtracking (removal nodes that have
401 already been selected) will be required in order to handle all possible
405 if not self._dynamic_config._slot_collision_info:
408 self._show_merge_list()
411 msg.append("\n!!! Multiple package instances within a single " + \
412 "package slot have been pulled\n")
413 msg.append("!!! into the dependency graph, resulting" + \
414 " in a slot conflict:\n\n")
416 # Max number of parents shown, to avoid flooding the display.
418 explanation_columns = 70
420 for (slot_atom, root), slot_nodes \
421 in self._dynamic_config._slot_collision_info.items():
422 msg.append(str(slot_atom))
424 msg.append(" for %s" % (root,))
427 for node in slot_nodes:
429 msg.append(str(node))
430 parent_atoms = self._dynamic_config._parent_atoms.get(node)
433 # Prefer conflict atoms over others.
434 for parent_atom in parent_atoms:
435 if len(pruned_list) >= max_parents:
437 if parent_atom in self._dynamic_config._slot_conflict_parent_atoms:
438 pruned_list.add(parent_atom)
440 # If this package was pulled in by conflict atoms then
441 # show those alone since those are the most interesting.
443 # When generating the pruned list, prefer instances
444 # of DependencyArg over instances of Package.
445 for parent_atom in parent_atoms:
446 if len(pruned_list) >= max_parents:
448 parent, atom = parent_atom
449 if isinstance(parent, DependencyArg):
450 pruned_list.add(parent_atom)
451 # Prefer Packages instances that themselves have been
452 # pulled into collision slots.
453 for parent_atom in parent_atoms:
454 if len(pruned_list) >= max_parents:
456 parent, atom = parent_atom
457 if isinstance(parent, Package) and \
458 (parent.slot_atom, parent.root) \
459 in self._dynamic_config._slot_collision_info:
460 pruned_list.add(parent_atom)
461 for parent_atom in parent_atoms:
462 if len(pruned_list) >= max_parents:
464 pruned_list.add(parent_atom)
465 omitted_parents = len(parent_atoms) - len(pruned_list)
466 parent_atoms = pruned_list
467 msg.append(" pulled in by\n")
468 for parent_atom in parent_atoms:
469 parent, atom = parent_atom
471 if isinstance(parent,
472 (PackageArg, AtomArg)):
473 # For PackageArg and AtomArg types, it's
474 # redundant to display the atom attribute.
475 msg.append(str(parent))
477 # Display the specific atom from SetArg or
479 msg.append("%s required by %s" % (atom, parent))
483 msg.append("(and %d more)\n" % omitted_parents)
485 msg.append(" (no parents)\n")
487 explanation = self._slot_conflict_explanation(slot_nodes)
490 msg.append(indent + "Explanation:\n\n")
491 for line in textwrap.wrap(explanation, explanation_columns):
492 msg.append(2*indent + line + "\n")
495 sys.stderr.write("".join(msg))
498 explanations_for_all = explanations == len(self._dynamic_config._slot_collision_info)
500 if explanations_for_all or "--quiet" in self._frozen_config.myopts:
504 msg.append("It may be possible to solve this problem ")
505 msg.append("by using package.mask to prevent one of ")
506 msg.append("those packages from being selected. ")
507 msg.append("However, it is also possible that conflicting ")
508 msg.append("dependencies exist such that they are impossible to ")
509 msg.append("satisfy simultaneously. If such a conflict exists in ")
510 msg.append("the dependencies of two different packages, then those ")
511 msg.append("packages can not be installed simultaneously.")
513 from formatter import AbstractFormatter, DumbWriter
514 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
516 f.add_flowing_data(x)
520 msg.append("For more information, see MASKED PACKAGES ")
521 msg.append("section in the emerge man page or refer ")
522 msg.append("to the Gentoo Handbook.")
524 f.add_flowing_data(x)
528 def _slot_conflict_explanation(self, slot_nodes):
530 When a slot conflict occurs due to USE deps, there are a few
531 different cases to consider:
533 1) New USE are correctly set but --newuse wasn't requested so an
534 installed package with incorrect USE happened to get pulled
535 into graph before the new one.
537 2) New USE are incorrectly set but an installed package has correct
538 USE so it got pulled into the graph, and a new instance also got
539 pulled in due to --newuse or an upgrade.
541 3) Multiple USE deps exist that can't be satisfied simultaneously,
542 and multiple package instances got pulled into the same slot to
543 satisfy the conflicting deps.
545 Currently, explanations and suggested courses of action are generated
546 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
549 if len(slot_nodes) != 2:
550 # Suggestions are only implemented for
551 # conflicts between two packages.
554 all_conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms
557 unmatched_node = None
558 for node in slot_nodes:
559 parent_atoms = self._dynamic_config._parent_atoms.get(node)
561 # Normally, there are always parent atoms. If there are
562 # none then something unexpected is happening and there's
563 # currently no suggestion for this case.
565 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
566 for parent_atom in conflict_atoms:
567 parent, atom = parent_atom
569 # Suggestions are currently only implemented for cases
570 # in which all conflict atoms have USE deps.
573 if matched_node is not None:
574 # If conflict atoms match multiple nodes
575 # then there's no suggestion.
578 matched_atoms = conflict_atoms
580 if unmatched_node is not None:
581 # Neither node is matched by conflict atoms, and
582 # there is no suggestion for this case.
584 unmatched_node = node
586 if matched_node is None or unmatched_node is None:
587 # This shouldn't happen.
590 if unmatched_node.installed and not matched_node.installed and \
591 unmatched_node.cpv == matched_node.cpv:
592 # If the conflicting packages are the same version then
593 # --newuse should be all that's needed. If they are different
594 # versions then there's some other problem.
595 return "New USE are correctly set, but --newuse wasn't" + \
596 " requested, so an installed package with incorrect USE " + \
597 "happened to get pulled into the dependency graph. " + \
598 "In order to solve " + \
599 "this, either specify the --newuse option or explicitly " + \
600 " reinstall '%s'." % matched_node.slot_atom
602 if matched_node.installed and not unmatched_node.installed:
603 atoms = sorted(set(atom for parent, atom in matched_atoms))
604 explanation = ("New USE for '%s' are incorrectly set. " + \
605 "In order to solve this, adjust USE to satisfy '%s'") % \
606 (matched_node.slot_atom, atoms[0])
608 for atom in atoms[1:-1]:
609 explanation += ", '%s'" % (atom,)
612 explanation += " and '%s'" % (atoms[-1],)
618 def _process_slot_conflicts(self):
620 Process slot conflict data to identify specific atoms which
621 lead to conflict. These atoms only match a subset of the
622 packages that have been pulled into a given slot.
624 for (slot_atom, root), slot_nodes \
625 in self._dynamic_config._slot_collision_info.items():
627 all_parent_atoms = set()
628 for pkg in slot_nodes:
629 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
632 all_parent_atoms.update(parent_atoms)
634 for pkg in slot_nodes:
635 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
636 if parent_atoms is None:
638 self._dynamic_config._parent_atoms[pkg] = parent_atoms
639 for parent_atom in all_parent_atoms:
640 if parent_atom in parent_atoms:
642 # Use package set for matching since it will match via
643 # PROVIDE when necessary, while match_from_list does not.
644 parent, atom = parent_atom
645 atom_set = InternalPackageSet(
646 initial_atoms=(atom,))
647 if atom_set.findAtomForPackage(pkg):
648 parent_atoms.add(parent_atom)
650 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
652 def _reinstall_for_flags(self, forced_flags,
653 orig_use, orig_iuse, cur_use, cur_iuse):
654 """Return a set of flags that trigger reinstallation, or None if there
655 are no such flags."""
656 if "--newuse" in self._frozen_config.myopts or \
657 "--binpkg-respect-use" in self._frozen_config.myopts:
658 flags = set(orig_iuse.symmetric_difference(
659 cur_iuse).difference(forced_flags))
660 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
661 cur_iuse.intersection(cur_use)))
664 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
665 flags = orig_iuse.intersection(orig_use).symmetric_difference(
666 cur_iuse.intersection(cur_use))
671 def _create_graph(self, allow_unsatisfied=False):
672 dep_stack = self._dynamic_config._dep_stack
673 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
674 while dep_stack or dep_disjunctive_stack:
675 self._spinner_update()
677 dep = dep_stack.pop()
678 if isinstance(dep, Package):
679 if not self._add_pkg_deps(dep,
680 allow_unsatisfied=allow_unsatisfied):
683 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
685 if dep_disjunctive_stack:
686 if not self._pop_disjunction(allow_unsatisfied):
690 def _add_dep(self, dep, allow_unsatisfied=False):
691 debug = "--debug" in self._frozen_config.myopts
692 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
693 nodeps = "--nodeps" in self._frozen_config.myopts
694 empty = "empty" in self._dynamic_config.myparams
695 deep = self._dynamic_config.myparams.get("deep", 0)
696 recurse = empty or deep is True or dep.depth <= deep
698 if not buildpkgonly and \
700 dep.parent not in self._dynamic_config._slot_collision_nodes:
701 if dep.parent.onlydeps:
702 # It's safe to ignore blockers if the
703 # parent is an --onlydeps node.
705 # The blocker applies to the root where
706 # the parent is or will be installed.
707 blocker = Blocker(atom=dep.atom,
708 eapi=dep.parent.metadata["EAPI"],
709 root=dep.parent.root)
710 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
713 if dep.child is None:
714 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
715 onlydeps=dep.onlydeps)
717 # The caller has selected a specific package
718 # via self._minimize_packages().
720 existing_node = self._dynamic_config._slot_pkg_map[
721 dep.root].get(dep_pkg.slot_atom)
722 if existing_node is not dep_pkg:
726 if dep.priority.optional:
727 # This could be an unecessary build-time dep
728 # pulled in by --with-bdeps=y.
730 if allow_unsatisfied:
731 self._dynamic_config._unsatisfied_deps.append(dep)
733 self._dynamic_config._unsatisfied_deps_for_display.append(
734 ((dep.root, dep.atom), {"myparent":dep.parent}))
736 # The parent node should not already be in
737 # runtime_pkg_mask, since that would trigger an
738 # infinite backtracking loop.
739 if self._dynamic_config._allow_backtracking:
740 if dep.parent in self._dynamic_config._runtime_pkg_mask:
741 if "--debug" in self._frozen_config.myopts:
743 "!!! backtracking loop detected: %s %s\n" % \
745 self._dynamic_config._runtime_pkg_mask[
746 dep.parent]), noiselevel=-1)
748 # Do not backtrack if only USE have to be changed in
749 # order to satisfy the dependency.
750 dep_pkg, existing_node = \
751 self._select_package(dep.root, dep.atom.without_use,
752 onlydeps=dep.onlydeps)
754 self._dynamic_config._runtime_pkg_mask.setdefault(
755 dep.parent, {})["missing dependency"] = \
756 set([(dep.parent, dep.root, dep.atom)])
757 self._dynamic_config._need_restart = True
758 if "--debug" in self._frozen_config.myopts:
762 msg.append("backtracking due to unsatisfied dep:")
763 msg.append(" parent: %s" % dep.parent)
764 msg.append(" priority: %s" % dep.priority)
765 msg.append(" root: %s" % dep.root)
766 msg.append(" atom: %s" % dep.atom)
768 writemsg_level("".join("%s\n" % l for l in msg),
769 noiselevel=-1, level=logging.DEBUG)
772 # In some cases, dep_check will return deps that shouldn't
773 # be proccessed any further, so they are identified and
774 # discarded here. Try to discard as few as possible since
775 # discarded dependencies reduce the amount of information
776 # available for optimization of merge order.
777 if dep.priority.satisfied and \
778 not dep_pkg.installed and \
779 not (existing_node or recurse):
781 if dep.root == self._frozen_config.target_root:
783 myarg = next(self._iter_atoms_for_pkg(dep_pkg))
784 except StopIteration:
786 except portage.exception.InvalidDependString:
787 if not dep_pkg.installed:
788 # This shouldn't happen since the package
789 # should have been masked.
792 self._dynamic_config._ignored_deps.append(dep)
795 if not self._add_pkg(dep_pkg, dep):
799 def _add_pkg(self, pkg, dep):
806 myparent = dep.parent
807 priority = dep.priority
810 priority = DepPriority()
812 Fills the digraph with nodes comprised of packages to merge.
813 mybigkey is the package spec of the package to merge.
814 myparent is the package depending on mybigkey ( or None )
815 addme = Should we add this package to the digraph or are we just looking at it's deps?
816 Think --onlydeps, we need to ignore packages in that case.
819 #IUSE-aware emerge -> USE DEP aware depgraph
820 #"no downgrade" emerge
822 # Ensure that the dependencies of the same package
823 # are never processed more than once.
824 previously_added = pkg in self._dynamic_config.digraph
826 # select the correct /var database that we'll be checking against
827 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
828 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
833 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
834 except portage.exception.InvalidDependString as e:
835 if not pkg.installed:
836 show_invalid_depstring_notice(
837 pkg, pkg.metadata["PROVIDE"], str(e))
842 if not pkg.installed and \
843 "empty" not in self._dynamic_config.myparams and \
844 vardbapi.match(pkg.slot_atom):
845 # Increase the priority of dependencies on packages that
846 # are being rebuilt. This optimizes merge order so that
847 # dependencies are rebuilt/updated as soon as possible,
848 # which is needed especially when emerge is called by
849 # revdep-rebuild since dependencies may be affected by ABI
850 # breakage that has rendered them useless. Don't adjust
851 # priority here when in "empty" mode since all packages
852 # are being merged in that case.
853 priority.rebuild = True
855 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
856 slot_collision = False
858 existing_node_matches = pkg.cpv == existing_node.cpv
859 if existing_node_matches and \
860 pkg != existing_node and \
861 dep.atom is not None:
862 # Use package set for matching since it will match via
863 # PROVIDE when necessary, while match_from_list does not.
864 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
865 if not atom_set.findAtomForPackage(existing_node):
866 existing_node_matches = False
867 if existing_node_matches:
868 # The existing node can be reused.
870 for parent_atom in arg_atoms:
871 parent, atom = parent_atom
872 self._dynamic_config.digraph.add(existing_node, parent,
874 self._add_parent_atom(existing_node, parent_atom)
875 # If a direct circular dependency is not an unsatisfied
876 # buildtime dependency then drop it here since otherwise
877 # it can skew the merge order calculation in an unwanted
879 if existing_node != myparent or \
880 (priority.buildtime and not priority.satisfied):
881 self._dynamic_config.digraph.addnode(existing_node, myparent,
883 if dep.atom is not None and dep.parent is not None:
884 self._add_parent_atom(existing_node,
885 (dep.parent, dep.atom))
888 # A slot conflict has occurred.
889 # The existing node should not already be in
890 # runtime_pkg_mask, since that would trigger an
891 # infinite backtracking loop.
892 if self._dynamic_config._allow_backtracking and \
894 self._dynamic_config._runtime_pkg_mask:
895 if "--debug" in self._frozen_config.myopts:
897 "!!! backtracking loop detected: %s %s\n" % \
899 self._dynamic_config._runtime_pkg_mask[
900 existing_node]), noiselevel=-1)
901 elif self._dynamic_config._allow_backtracking and \
902 not self._accept_blocker_conflicts():
903 self._add_slot_conflict(pkg)
904 if dep.atom is not None and dep.parent is not None:
905 self._add_parent_atom(pkg, (dep.parent, dep.atom))
907 for parent_atom in arg_atoms:
908 parent, atom = parent_atom
909 self._add_parent_atom(pkg, parent_atom)
910 self._process_slot_conflicts()
913 self._dynamic_config._parent_atoms.get(pkg, set())
915 parent_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
916 if pkg >= existing_node:
917 # We only care about the parent atoms
918 # when they trigger a downgrade.
921 self._dynamic_config._runtime_pkg_mask.setdefault(
922 existing_node, {})["slot conflict"] = parent_atoms
923 self._dynamic_config._need_restart = True
924 if "--debug" in self._frozen_config.myopts:
928 msg.append("backtracking due to slot conflict:")
929 msg.append(" package: %s" % existing_node)
930 msg.append(" slot: %s" % existing_node.slot_atom)
931 msg.append(" parents: %s" % \
932 [(str(parent), atom) \
933 for parent, atom in parent_atoms])
935 writemsg_level("".join("%s\n" % l for l in msg),
936 noiselevel=-1, level=logging.DEBUG)
939 # A slot collision has occurred. Sometimes this coincides
940 # with unresolvable blockers, so the slot collision will be
941 # shown later if there are no unresolvable blockers.
942 self._add_slot_conflict(pkg)
943 slot_collision = True
946 # Now add this node to the graph so that self.display()
947 # can show use flags and --tree portage.output. This node is
948 # only being partially added to the graph. It must not be
949 # allowed to interfere with the other nodes that have been
950 # added. Do not overwrite data for existing nodes in
951 # self._dynamic_config.mydbapi since that data will be used for blocker
953 # Even though the graph is now invalid, continue to process
954 # dependencies so that things like --fetchonly can still
955 # function despite collisions.
957 elif not previously_added:
958 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
959 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
960 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
962 if not pkg.installed:
963 # Allow this package to satisfy old-style virtuals in case it
964 # doesn't already. Any pre-existing providers will be preferred
967 pkgsettings.setinst(pkg.cpv, pkg.metadata)
968 # For consistency, also update the global virtuals.
969 settings = self._frozen_config.roots[pkg.root].settings
971 settings.setinst(pkg.cpv, pkg.metadata)
973 except portage.exception.InvalidDependString as e:
974 show_invalid_depstring_notice(
975 pkg, pkg.metadata["PROVIDE"], str(e))
980 self._dynamic_config._set_nodes.add(pkg)
982 # Do this even when addme is False (--onlydeps) so that the
983 # parent/child relationship is always known in case
984 # self._show_slot_collision_notice() needs to be called later.
985 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
986 if dep.atom is not None and dep.parent is not None:
987 self._add_parent_atom(pkg, (dep.parent, dep.atom))
990 for parent_atom in arg_atoms:
991 parent, atom = parent_atom
992 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
993 self._add_parent_atom(pkg, parent_atom)
995 """ This section determines whether we go deeper into dependencies or not.
996 We want to go deeper on a few occasions:
997 Installing package A, we need to make sure package A's deps are met.
998 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
999 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1004 deep = self._dynamic_config.myparams.get("deep", 0)
1005 empty = "empty" in self._dynamic_config.myparams
1006 recurse = empty or deep is True or depth + 1 <= deep
1007 dep_stack = self._dynamic_config._dep_stack
1008 if "recurse" not in self._dynamic_config.myparams:
1010 elif pkg.installed and not recurse:
1011 dep_stack = self._dynamic_config._ignored_deps
1013 self._spinner_update()
1015 if not previously_added:
1016 dep_stack.append(pkg)
1019 def _add_parent_atom(self, pkg, parent_atom):
1020 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1021 if parent_atoms is None:
1022 parent_atoms = set()
1023 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1024 parent_atoms.add(parent_atom)
1026 def _add_slot_conflict(self, pkg):
1027 self._dynamic_config._slot_collision_nodes.add(pkg)
1028 slot_key = (pkg.slot_atom, pkg.root)
1029 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1030 if slot_nodes is None:
1032 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1033 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1036 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1038 mytype = pkg.type_name
1041 metadata = pkg.metadata
1042 myuse = pkg.use.enabled
1044 depth = pkg.depth + 1
1045 removal_action = "remove" in self._dynamic_config.myparams
1048 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1050 edepend[k] = metadata[k]
1052 if not pkg.built and \
1053 "--buildpkgonly" in self._frozen_config.myopts and \
1054 "deep" not in self._dynamic_config.myparams and \
1055 "empty" not in self._dynamic_config.myparams:
1056 edepend["RDEPEND"] = ""
1057 edepend["PDEPEND"] = ""
1058 bdeps_optional = False
1060 if pkg.built and not removal_action:
1061 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1062 # Pull in build time deps as requested, but marked them as
1063 # "optional" since they are not strictly required. This allows
1064 # more freedom in the merge order calculation for solving
1065 # circular dependencies. Don't convert to PDEPEND since that
1066 # could make --with-bdeps=y less effective if it is used to
1067 # adjust merge order to prevent built_with_use() calls from
1069 bdeps_optional = True
1071 # built packages do not have build time dependencies.
1072 edepend["DEPEND"] = ""
1074 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1075 edepend["DEPEND"] = ""
1081 root_deps = self._frozen_config.myopts.get("--root-deps")
1082 if root_deps is not None:
1083 if root_deps is True:
1085 elif root_deps == "rdeps":
1086 edepend["DEPEND"] = ""
1089 (bdeps_root, edepend["DEPEND"],
1090 self._priority(buildtime=(not bdeps_optional),
1091 optional=bdeps_optional)),
1092 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
1093 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
1096 debug = "--debug" in self._frozen_config.myopts
1097 strict = mytype != "installed"
1100 portage.dep._dep_check_strict = False
1102 for dep_root, dep_string, dep_priority in deps:
1107 print("Parent: ", jbigkey)
1108 print("Depstring:", dep_string)
1109 print("Priority:", dep_priority)
1113 dep_string = portage.dep.paren_normalize(
1114 portage.dep.use_reduce(
1115 portage.dep.paren_reduce(dep_string),
1116 uselist=pkg.use.enabled))
1118 dep_string = list(self._queue_disjunctive_deps(
1119 pkg, dep_root, dep_priority, dep_string))
1121 except portage.exception.InvalidDependString as e:
1125 show_invalid_depstring_notice(pkg, dep_string, str(e))
1131 dep_string = portage.dep.paren_enclose(dep_string)
1133 if not self._add_pkg_dep_string(
1134 pkg, dep_root, dep_priority, dep_string,
1138 except portage.exception.AmbiguousPackageName as e:
1140 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1141 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1143 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1144 portage.writemsg("\n", noiselevel=-1)
1145 if mytype == "binary":
1147 "!!! This binary package cannot be installed: '%s'\n" % \
1148 mykey, noiselevel=-1)
1149 elif mytype == "ebuild":
1150 portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
1151 myebuild, mylocation = portdb.findname2(mykey)
1152 portage.writemsg("!!! This ebuild cannot be installed: " + \
1153 "'%s'\n" % myebuild, noiselevel=-1)
1154 portage.writemsg("!!! Please notify the package maintainer " + \
1155 "that atoms must be fully-qualified.\n", noiselevel=-1)
1158 portage.dep._dep_check_strict = True
1161 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1163 depth = pkg.depth + 1
1164 debug = "--debug" in self._frozen_config.myopts
1165 strict = pkg.type_name != "installed"
1169 print("Parent: ", pkg)
1170 print("Depstring:", dep_string)
1171 print("Priority:", dep_priority)
1174 selected_atoms = self._select_atoms(dep_root,
1175 dep_string, myuse=pkg.use.enabled, parent=pkg,
1176 strict=strict, priority=dep_priority)
1177 except portage.exception.InvalidDependString as e:
1178 show_invalid_depstring_notice(pkg, dep_string, str(e))
1185 print("Candidates:", selected_atoms)
1187 root_config = self._frozen_config.roots[dep_root]
1188 vardb = root_config.trees["vartree"].dbapi
1190 for atom, child in self._minimize_children(
1191 pkg, dep_priority, root_config, selected_atoms[pkg]):
1193 mypriority = dep_priority.copy()
1194 if not atom.blocker and vardb.match(atom):
1195 mypriority.satisfied = True
1197 if not self._add_dep(Dependency(atom=atom,
1198 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1199 priority=mypriority, root=dep_root),
1200 allow_unsatisfied=allow_unsatisfied):
1203 selected_atoms.pop(pkg)
1205 # Add selected indirect virtual deps to the graph. This
1206 # takes advantage of circular dependency avoidance that's done
1207 # by dep_zapdeps. We preserve actual parent/child relationships
1208 # here in order to avoid distorting the dependency graph like
1209 # <=portage-2.1.6.x did.
1210 for virt_pkg, atoms in selected_atoms.items():
1212 # Just assume depth + 1 here for now, though it's not entirely
1213 # accurate since multilple levels of indirect virtual deps may
1214 # have been traversed. The _add_pkg call will reset the depth to
1215 # 0 if this package happens to match an argument.
1216 if not self._add_pkg(virt_pkg,
1217 Dependency(atom=Atom('=' + virt_pkg.cpv),
1218 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1222 for atom, child in self._minimize_children(
1223 pkg, self._priority(runtime=True), root_config, atoms):
1224 # This is a GLEP 37 virtual, so its deps are all runtime.
1225 mypriority = self._priority(runtime=True)
1226 if not atom.blocker and vardb.match(atom):
1227 mypriority.satisfied = True
1229 if not self._add_dep(Dependency(atom=atom,
1230 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1231 parent=virt_pkg, priority=mypriority, root=dep_root),
1232 allow_unsatisfied=allow_unsatisfied):
1236 print("Exiting...", pkg)
1240 def _minimize_children(self, parent, priority, root_config, atoms):
1242 Selects packages to satisfy the given atoms, and minimizes the
1243 number of selected packages. This serves to identify and eliminate
1244 redundant package selections when multiple atoms happen to specify
1254 dep_pkg, existing_node = self._select_package(
1255 root_config.root, atom)
1259 atom_pkg_map[atom] = dep_pkg
1261 if len(atom_pkg_map) < 2:
1262 for item in atom_pkg_map.items():
1268 for atom, pkg in atom_pkg_map.items():
1269 pkg_atom_map.setdefault(pkg, set()).add(atom)
1270 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1272 for cp, pkgs in cp_pkg_map.items():
1275 for atom in pkg_atom_map[pkg]:
1279 # Use a digraph to identify and eliminate any
1280 # redundant package selections.
1281 atom_pkg_graph = digraph()
1284 for atom in pkg_atom_map[pkg1]:
1286 atom_pkg_graph.add(pkg1, atom)
1287 atom_set = InternalPackageSet(initial_atoms=(atom,))
1291 if atom_set.findAtomForPackage(pkg2):
1292 atom_pkg_graph.add(pkg2, atom)
1295 eliminate_pkg = True
1296 for atom in atom_pkg_graph.parent_nodes(pkg):
1297 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1298 eliminate_pkg = False
1301 atom_pkg_graph.remove(pkg)
1303 for atom in cp_atoms:
1304 child_pkgs = atom_pkg_graph.child_nodes(atom)
1305 yield (atom, child_pkgs[0])
1307 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1309 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1310 Yields non-disjunctive deps. Raises InvalidDependString when
1314 while i < len(dep_struct):
1316 if isinstance(x, list):
1317 for y in self._queue_disjunctive_deps(
1318 pkg, dep_root, dep_priority, x):
1321 self._queue_disjunction(pkg, dep_root, dep_priority,
1322 [ x, dep_struct[ i + 1 ] ] )
1326 x = portage.dep.Atom(x)
1327 except portage.exception.InvalidAtom:
1328 if not pkg.installed:
1329 raise portage.exception.InvalidDependString(
1330 "invalid atom: '%s'" % x)
1332 # Note: Eventually this will check for PROPERTIES=virtual
1333 # or whatever other metadata gets implemented for this
1335 if x.cp.startswith('virtual/'):
1336 self._queue_disjunction( pkg, dep_root,
1337 dep_priority, [ str(x) ] )
1342 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1343 self._dynamic_config._dep_disjunctive_stack.append(
1344 (pkg, dep_root, dep_priority, dep_struct))
1346 def _pop_disjunction(self, allow_unsatisfied):
1348 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1349 populate self._dynamic_config._dep_stack.
1351 pkg, dep_root, dep_priority, dep_struct = \
1352 self._dynamic_config._dep_disjunctive_stack.pop()
1353 dep_string = portage.dep.paren_enclose(dep_struct)
1354 if not self._add_pkg_dep_string(
1355 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1359 def _priority(self, **kwargs):
1360 if "remove" in self._dynamic_config.myparams:
1361 priority_constructor = UnmergeDepPriority
1363 priority_constructor = DepPriority
1364 return priority_constructor(**kwargs)
1366 def _dep_expand(self, root_config, atom_without_category):
1368 @param root_config: a root config instance
1369 @type root_config: RootConfig
1370 @param atom_without_category: an atom without a category component
1371 @type atom_without_category: String
1373 @returns: a list of atoms containing categories (possibly empty)
1375 null_cp = portage.dep_getkey(insert_category_into_atom(
1376 atom_without_category, "null"))
1377 cat, atom_pn = portage.catsplit(null_cp)
1379 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1381 for db, pkg_type, built, installed, db_keys in dbs:
1382 for cat in db.categories:
1383 if db.cp_list("%s/%s" % (cat, atom_pn)):
1387 for cat in categories:
1388 deps.append(Atom(insert_category_into_atom(
1389 atom_without_category, cat)))
1392 def _have_new_virt(self, root, atom_cp):
1394 for db, pkg_type, built, installed, db_keys in \
1395 self._dynamic_config._filtered_trees[root]["dbs"]:
1396 if db.cp_list(atom_cp):
1401 def _iter_atoms_for_pkg(self, pkg):
1402 # TODO: add multiple $ROOT support
1403 if pkg.root != self._frozen_config.target_root:
1405 atom_arg_map = self._dynamic_config._atom_arg_map
1406 root_config = self._frozen_config.roots[pkg.root]
1407 for atom in self._dynamic_config._set_atoms.iterAtomsForPackage(pkg):
1408 if atom.cp != pkg.cp and \
1409 self._have_new_virt(pkg.root, atom.cp):
1412 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1413 visible_pkgs.reverse() # descending order
1415 for visible_pkg in visible_pkgs:
1416 if visible_pkg.cp != atom.cp:
1418 if pkg >= visible_pkg:
1419 # This is descending order, and we're not
1420 # interested in any versions <= pkg given.
1422 if pkg.slot_atom != visible_pkg.slot_atom:
1423 higher_slot = visible_pkg
1425 if higher_slot is not None:
1427 for arg in atom_arg_map[(atom, pkg.root)]:
1428 if isinstance(arg, PackageArg) and \
1433 def select_files(self, myfiles):
1434 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1435 self._dynamic_config._initial_arg_list and call self._resolve to create the
1436 appropriate depgraph and return a favorite list."""
1437 debug = "--debug" in self._frozen_config.myopts
1438 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1439 sets = root_config.sets
1440 getSetAtoms = root_config.setconfig.getSetAtoms
1442 myroot = self._frozen_config.target_root
1443 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1444 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1445 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1446 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1447 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1448 pkgsettings = self._frozen_config.pkgsettings[myroot]
1450 onlydeps = "--onlydeps" in self._frozen_config.myopts
1453 ext = os.path.splitext(x)[1]
1455 if not os.path.exists(x):
1457 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1458 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1459 elif os.path.exists(
1460 os.path.join(pkgsettings["PKGDIR"], x)):
1461 x = os.path.join(pkgsettings["PKGDIR"], x)
1463 print("\n\n!!! Binary package '"+str(x)+"' does not exist.")
1464 print("!!! Please ensure the tbz2 exists as specified.\n")
1465 return 0, myfavorites
1466 mytbz2=portage.xpak.tbz2(x)
1467 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1468 if os.path.realpath(x) != \
1469 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1470 print(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n"))
1471 return 0, myfavorites
1473 pkg = self._pkg(mykey, "binary", root_config,
1475 args.append(PackageArg(arg=x, package=pkg,
1476 root_config=root_config))
1477 elif ext==".ebuild":
1478 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1479 pkgdir = os.path.dirname(ebuild_path)
1480 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1481 cp = pkgdir[len(tree_root)+1:]
1482 e = portage.exception.PackageNotFound(
1483 ("%s is not in a valid portage tree " + \
1484 "hierarchy or does not exist") % x)
1485 if not portage.isvalidatom(cp):
1487 cat = portage.catsplit(cp)[0]
1488 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1489 if not portage.isvalidatom("="+mykey):
1491 ebuild_path = portdb.findname(mykey)
1493 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1494 cp, os.path.basename(ebuild_path)):
1495 print(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n"))
1496 return 0, myfavorites
1497 if mykey not in portdb.xmatch(
1498 "match-visible", portage.dep_getkey(mykey)):
1499 print(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use"))
1500 print(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man"))
1501 print(colorize("BAD", "*** page for details."))
1502 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1505 raise portage.exception.PackageNotFound(
1506 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1507 pkg = self._pkg(mykey, "ebuild", root_config,
1509 args.append(PackageArg(arg=x, package=pkg,
1510 root_config=root_config))
1511 elif x.startswith(os.path.sep):
1512 if not x.startswith(myroot):
1513 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1514 " $ROOT.\n") % x, noiselevel=-1)
1516 # Queue these up since it's most efficient to handle
1517 # multiple files in a single iter_owners() call.
1518 lookup_owners.append(x)
1520 if x in ("system", "world"):
1522 if x.startswith(SETPREFIX):
1523 s = x[len(SETPREFIX):]
1525 raise portage.exception.PackageSetNotFound(s)
1526 if s in self._dynamic_config._sets:
1528 # Recursively expand sets so that containment tests in
1529 # self._get_parent_sets() properly match atoms in nested
1530 # sets (like if world contains system).
1531 expanded_set = InternalPackageSet(
1532 initial_atoms=getSetAtoms(s))
1533 self._dynamic_config._sets[s] = expanded_set
1534 args.append(SetArg(arg=x, set=expanded_set,
1535 root_config=root_config))
1537 if not is_valid_package_atom(x):
1538 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1540 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1541 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1543 # Don't expand categories or old-style virtuals here unless
1544 # necessary. Expansion of old-style virtuals here causes at
1545 # least the following problems:
1546 # 1) It's more difficult to determine which set(s) an atom
1547 # came from, if any.
1548 # 2) It takes away freedom from the resolver to choose other
1549 # possible expansions when necessary.
1551 args.append(AtomArg(arg=x, atom=Atom(x),
1552 root_config=root_config))
1554 expanded_atoms = self._dep_expand(root_config, x)
1555 installed_cp_set = set()
1556 for atom in expanded_atoms:
1557 if vardb.cp_list(atom.cp):
1558 installed_cp_set.add(atom.cp)
1560 if len(installed_cp_set) > 1:
1561 non_virtual_cps = set()
1562 for atom_cp in installed_cp_set:
1563 if not atom_cp.startswith("virtual/"):
1564 non_virtual_cps.add(atom_cp)
1565 if len(non_virtual_cps) == 1:
1566 installed_cp_set = non_virtual_cps
1568 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1569 installed_cp = next(iter(installed_cp_set))
1570 expanded_atoms = [atom for atom in expanded_atoms \
1571 if atom.cp == installed_cp]
1573 # If a non-virtual package and one or more virtual packages
1574 # are in expanded_atoms, use the non-virtual package.
1575 if len(expanded_atoms) > 1:
1576 number_of_virtuals = 0
1577 for expanded_atom in expanded_atoms:
1578 if expanded_atom.cp.startswith("virtual/"):
1579 number_of_virtuals += 1
1581 candidate = expanded_atom
1582 if len(expanded_atoms) - number_of_virtuals == 1:
1583 expanded_atoms = [ candidate ]
1585 if len(expanded_atoms) > 1:
1588 ambiguous_package_name(x, expanded_atoms, root_config,
1589 self._frozen_config.spinner, self._frozen_config.myopts)
1590 return False, myfavorites
1592 atom = expanded_atoms[0]
1594 null_atom = Atom(insert_category_into_atom(x, "null"))
1595 cat, atom_pn = portage.catsplit(null_atom.cp)
1596 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1598 # Allow the depgraph to choose which virtual.
1599 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1603 args.append(AtomArg(arg=x, atom=atom,
1604 root_config=root_config))
1608 search_for_multiple = False
1609 if len(lookup_owners) > 1:
1610 search_for_multiple = True
1612 for x in lookup_owners:
1613 if not search_for_multiple and os.path.isdir(x):
1614 search_for_multiple = True
1615 relative_paths.append(x[len(myroot)-1:])
1618 for pkg, relative_path in \
1619 real_vardb._owners.iter_owners(relative_paths):
1620 owners.add(pkg.mycpv)
1621 if not search_for_multiple:
1625 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1626 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1630 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1632 # portage now masks packages with missing slot, but it's
1633 # possible that one was installed by an older version
1634 atom = Atom(portage.cpv_getkey(cpv))
1636 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1637 args.append(AtomArg(arg=atom, atom=atom,
1638 root_config=root_config))
1640 if "--update" in self._frozen_config.myopts:
1641 # In some cases, the greedy slots behavior can pull in a slot that
1642 # the user would want to uninstall due to it being blocked by a
1643 # newer version in a different slot. Therefore, it's necessary to
1644 # detect and discard any that should be uninstalled. Each time
1645 # that arguments are updated, package selections are repeated in
1646 # order to ensure consistency with the current arguments:
1648 # 1) Initialize args
1649 # 2) Select packages and generate initial greedy atoms
1650 # 3) Update args with greedy atoms
1651 # 4) Select packages and generate greedy atoms again, while
1652 # accounting for any blockers between selected packages
1653 # 5) Update args with revised greedy atoms
1655 self._set_args(args)
1658 greedy_args.append(arg)
1659 if not isinstance(arg, AtomArg):
1661 for atom in self._greedy_slots(arg.root_config, arg.atom):
1663 AtomArg(arg=arg.arg, atom=atom,
1664 root_config=arg.root_config))
1666 self._set_args(greedy_args)
1669 # Revise greedy atoms, accounting for any blockers
1670 # between selected packages.
1671 revised_greedy_args = []
1673 revised_greedy_args.append(arg)
1674 if not isinstance(arg, AtomArg):
1676 for atom in self._greedy_slots(arg.root_config, arg.atom,
1677 blocker_lookahead=True):
1678 revised_greedy_args.append(
1679 AtomArg(arg=arg.arg, atom=atom,
1680 root_config=arg.root_config))
1681 args = revised_greedy_args
1682 del revised_greedy_args
1684 self._set_args(args)
1686 myfavorites = set(myfavorites)
1688 if isinstance(arg, (AtomArg, PackageArg)):
1689 myfavorites.add(arg.atom)
1690 elif isinstance(arg, SetArg):
1691 myfavorites.add(arg.arg)
1692 myfavorites = list(myfavorites)
1695 portage.writemsg("\n", noiselevel=-1)
1696 # Order needs to be preserved since a feature of --nodeps
1697 # is to allow the user to force a specific merge order.
1698 self._dynamic_config._initial_arg_list = args[:]
1700 return self._resolve(myfavorites)
1702 def _resolve(self, myfavorites):
1703 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1704 call self._creategraph to process theier deps and return
1706 debug = "--debug" in self._frozen_config.myopts
1707 onlydeps = "--onlydeps" in self._frozen_config.myopts
1708 myroot = self._frozen_config.target_root
1709 pkgsettings = self._frozen_config.pkgsettings[myroot]
1710 pprovideddict = pkgsettings.pprovideddict
1711 virtuals = pkgsettings.getvirtuals()
1712 for arg in self._dynamic_config._initial_arg_list:
1713 for atom in arg.set:
1714 self._spinner_update()
1715 dep = Dependency(atom=atom, onlydeps=onlydeps,
1716 root=myroot, parent=arg)
1718 pprovided = pprovideddict.get(atom.cp)
1719 if pprovided and portage.match_from_list(atom, pprovided):
1720 # A provided package has been specified on the command line.
1721 self._dynamic_config._pprovided_args.append((arg, atom))
1723 if isinstance(arg, PackageArg):
1724 if not self._add_pkg(arg.package, dep) or \
1725 not self._create_graph():
1726 if not self._dynamic_config._need_restart:
1727 sys.stderr.write(("\n\n!!! Problem " + \
1728 "resolving dependencies for %s\n") % \
1730 return 0, myfavorites
1733 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1734 (arg, atom), noiselevel=-1)
1735 pkg, existing_node = self._select_package(
1736 myroot, atom, onlydeps=onlydeps)
1738 pprovided_match = False
1739 for virt_choice in virtuals.get(atom.cp, []):
1740 expanded_atom = portage.dep.Atom(
1741 atom.replace(atom.cp,
1742 portage.dep_getkey(virt_choice), 1))
1743 pprovided = pprovideddict.get(expanded_atom.cp)
1745 portage.match_from_list(expanded_atom, pprovided):
1746 # A provided package has been
1747 # specified on the command line.
1748 self._dynamic_config._pprovided_args.append((arg, atom))
1749 pprovided_match = True
1754 if not (isinstance(arg, SetArg) and \
1755 arg.name in ("system", "world")):
1756 self._dynamic_config._unsatisfied_deps_for_display.append(
1757 ((myroot, atom), {}))
1758 return 0, myfavorites
1759 self._dynamic_config._missing_args.append((arg, atom))
1761 if atom.cp != pkg.cp:
1762 # For old-style virtuals, we need to repeat the
1763 # package.provided check against the selected package.
1764 expanded_atom = atom.replace(atom.cp, pkg.cp)
1765 pprovided = pprovideddict.get(pkg.cp)
1767 portage.match_from_list(expanded_atom, pprovided):
1768 # A provided package has been
1769 # specified on the command line.
1770 self._dynamic_config._pprovided_args.append((arg, atom))
1772 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1773 self._dynamic_config._unsatisfied_deps_for_display.append(
1774 ((myroot, atom), {}))
1775 # Previous behavior was to bail out in this case, but
1776 # since the dep is satisfied by the installed package,
1777 # it's more friendly to continue building the graph
1778 # and just show a warning message. Therefore, only bail
1779 # out here if the atom is not from either the system or
1781 if not (isinstance(arg, SetArg) and \
1782 arg.name in ("system", "world")):
1783 return 0, myfavorites
1785 # Add the selected package to the graph as soon as possible
1786 # so that later dep_check() calls can use it as feedback
1787 # for making more consistent atom selections.
1788 if not self._add_pkg(pkg, dep):
1789 if self._dynamic_config._need_restart:
1791 elif isinstance(arg, SetArg):
1792 sys.stderr.write(("\n\n!!! Problem resolving " + \
1793 "dependencies for %s from %s\n") % \
1796 sys.stderr.write(("\n\n!!! Problem resolving " + \
1797 "dependencies for %s\n") % atom)
1798 return 0, myfavorites
1800 except portage.exception.MissingSignature as e:
1801 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1802 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1803 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1804 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1805 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1806 return 0, myfavorites
1807 except portage.exception.InvalidSignature as e:
1808 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1809 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1810 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1811 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1812 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1813 return 0, myfavorites
1814 except SystemExit as e:
1815 raise # Needed else can't exit
1816 except Exception as e:
1817 print("\n\n!!! Problem in '%s' dependencies." % atom, file=sys.stderr)
1818 print("!!!", str(e), getattr(e, "__module__", None), file=sys.stderr)
1821 # Now that the root packages have been added to the graph,
1822 # process the dependencies.
1823 if not self._create_graph():
1824 return 0, myfavorites
1827 if "--usepkgonly" in self._frozen_config.myopts:
1828 for xs in self._dynamic_config.digraph.all_nodes():
1829 if not isinstance(xs, Package):
1831 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1835 print("Missing binary for:",xs[2])
1839 except self._unknown_internal_error:
1840 return False, myfavorites
1842 # We're true here unless we are missing binaries.
1843 return (not missing,myfavorites)
1845 def _set_args(self, args):
1847 Create the "args" package set from atoms and packages given as
1848 arguments. This method can be called multiple times if necessary.
1849 The package selection cache is automatically invalidated, since
1850 arguments influence package selections.
1852 args_set = self._dynamic_config._sets["args"]
1855 if not isinstance(arg, (AtomArg, PackageArg)):
1858 if atom in args_set:
1862 self._dynamic_config._set_atoms.clear()
1863 self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.values()))
1864 atom_arg_map = self._dynamic_config._atom_arg_map
1865 atom_arg_map.clear()
1867 for atom in arg.set:
1868 atom_key = (atom, arg.root_config.root)
1869 refs = atom_arg_map.get(atom_key)
1872 atom_arg_map[atom_key] = refs
1876 # Invalidate the package selection cache, since
1877 # arguments influence package selections.
1878 self._dynamic_config._highest_pkg_cache.clear()
1879 for trees in self._dynamic_config._filtered_trees.values():
1880 trees["porttree"].dbapi._clear_cache()
1882 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1884 Return a list of slot atoms corresponding to installed slots that
1885 differ from the slot of the highest visible match. When
1886 blocker_lookahead is True, slot atoms that would trigger a blocker
1887 conflict are automatically discarded, potentially allowing automatic
1888 uninstallation of older slots when appropriate.
1890 highest_pkg, in_graph = self._select_package(root_config.root, atom)
1891 if highest_pkg is None:
1893 vardb = root_config.trees["vartree"].dbapi
1895 for cpv in vardb.match(atom):
1896 # don't mix new virtuals with old virtuals
1897 if portage.cpv_getkey(cpv) == highest_pkg.cp:
1898 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1900 slots.add(highest_pkg.metadata["SLOT"])
1904 slots.remove(highest_pkg.metadata["SLOT"])
1907 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1908 pkg, in_graph = self._select_package(root_config.root, slot_atom)
1909 if pkg is not None and \
1910 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1911 greedy_pkgs.append(pkg)
1914 if not blocker_lookahead:
1915 return [pkg.slot_atom for pkg in greedy_pkgs]
1918 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1919 for pkg in greedy_pkgs + [highest_pkg]:
1920 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1922 selected_atoms = self._select_atoms(
1923 pkg.root, dep_str, pkg.use.enabled,
1924 parent=pkg, strict=True)
1925 except portage.exception.InvalidDependString:
1928 for atoms in selected_atoms.values():
1929 blocker_atoms.extend(x for x in atoms if x.blocker)
1930 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
1932 if highest_pkg not in blockers:
1935 # filter packages with invalid deps
1936 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
1938 # filter packages that conflict with highest_pkg
1939 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
1940 (blockers[highest_pkg].findAtomForPackage(pkg) or \
1941 blockers[pkg].findAtomForPackage(highest_pkg))]
1946 # If two packages conflict, discard the lower version.
1947 discard_pkgs = set()
1948 greedy_pkgs.sort(reverse=True)
1949 for i in range(len(greedy_pkgs) - 1):
1950 pkg1 = greedy_pkgs[i]
1951 if pkg1 in discard_pkgs:
1953 for j in range(i + 1, len(greedy_pkgs)):
1954 pkg2 = greedy_pkgs[j]
1955 if pkg2 in discard_pkgs:
1957 if blockers[pkg1].findAtomForPackage(pkg2) or \
1958 blockers[pkg2].findAtomForPackage(pkg1):
1960 discard_pkgs.add(pkg2)
1962 return [pkg.slot_atom for pkg in greedy_pkgs \
1963 if pkg not in discard_pkgs]
1965 def _select_atoms_from_graph(self, *pargs, **kwargs):
1967 Prefer atoms matching packages that have already been
1968 added to the graph or those that are installed and have
1969 not been scheduled for replacement.
1971 kwargs["trees"] = self._dynamic_config._graph_trees
1972 return self._select_atoms_highest_available(*pargs, **kwargs)
1974 def _select_atoms_highest_available(self, root, depstring,
1975 myuse=None, parent=None, strict=True, trees=None, priority=None):
1976 """This will raise InvalidDependString if necessary. If trees is
1977 None then self._dynamic_config._filtered_trees is used."""
1978 pkgsettings = self._frozen_config.pkgsettings[root]
1980 trees = self._dynamic_config._filtered_trees
1981 atom_graph = digraph()
1984 if parent is not None:
1985 trees[root]["parent"] = parent
1986 trees[root]["atom_graph"] = atom_graph
1987 if priority is not None:
1988 trees[root]["priority"] = priority
1990 portage.dep._dep_check_strict = False
1991 mycheck = portage.dep_check(depstring, None,
1992 pkgsettings, myuse=myuse,
1993 myroot=root, trees=trees)
1995 if parent is not None:
1996 trees[root].pop("parent")
1997 trees[root].pop("atom_graph")
1998 if priority is not None:
1999 trees[root].pop("priority")
2000 portage.dep._dep_check_strict = True
2002 raise portage.exception.InvalidDependString(mycheck[1])
2004 selected_atoms = mycheck[1]
2006 chosen_atoms = frozenset(mycheck[1])
2007 selected_atoms = {parent : []}
2008 for node in atom_graph:
2009 if isinstance(node, Atom):
2014 pkg, virt_atom = node
2015 if virt_atom not in chosen_atoms:
2017 if not portage.match_from_list(virt_atom, [pkg]):
2018 # Typically this means that the atom
2019 # specifies USE deps that are unsatisfied
2020 # by the selected package. The caller will
2021 # record this as an unsatisfied dependency
2025 selected_atoms[pkg] = [atom for atom in \
2026 atom_graph.child_nodes(node) if atom in chosen_atoms]
2028 return selected_atoms
2030 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2031 check_backtrack=False):
2033 When check_backtrack=True, no output is produced and
2034 the method either returns or raises _backtrack_mask if
2035 a matching package has been masked by backtracking.
2037 backtrack_mask = False
2038 atom_set = InternalPackageSet(initial_atoms=(atom,))
2039 xinfo = '"%s"' % atom
2042 # Discard null/ from failed cpv_expand category expansion.
2043 xinfo = xinfo.replace("null/", "")
2044 masked_packages = []
2046 masked_pkg_instances = set()
2047 missing_licenses = []
2048 have_eapi_mask = False
2049 pkgsettings = self._frozen_config.pkgsettings[root]
2050 implicit_iuse = pkgsettings._get_implicit_iuse()
2051 root_config = self._frozen_config.roots[root]
2052 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2053 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2054 for db, pkg_type, built, installed, db_keys in dbs:
2058 if hasattr(db, "xmatch"):
2059 cpv_list = db.xmatch("match-all", atom.without_use)
2061 cpv_list = db.match(atom.without_use)
2064 for cpv in cpv_list:
2065 metadata, mreasons = get_mask_info(root_config, cpv,
2066 pkgsettings, db, pkg_type, built, installed, db_keys)
2067 if metadata is not None:
2068 pkg = self._pkg(cpv, pkg_type, root_config,
2069 installed=installed)
2070 if pkg.cp != atom.cp:
2071 # A cpv can be returned from dbapi.match() as an
2072 # old-style virtual match even in cases when the
2073 # package does not actually PROVIDE the virtual.
2074 # Filter out any such false matches here.
2075 if not atom_set.findAtomForPackage(pkg):
2077 if pkg in self._dynamic_config._runtime_pkg_mask:
2078 backtrack_reasons = \
2079 self._dynamic_config._runtime_pkg_mask[pkg]
2080 mreasons.append('backtracking: %s' % \
2081 ', '.join(sorted(backtrack_reasons)))
2082 backtrack_mask = True
2084 masked_pkg_instances.add(pkg)
2086 missing_use.append(pkg)
2089 masked_packages.append(
2090 (root_config, pkgsettings, cpv, metadata, mreasons))
2094 raise self._backtrack_mask()
2098 missing_use_reasons = []
2099 missing_iuse_reasons = []
2100 for pkg in missing_use:
2101 use = pkg.use.enabled
2102 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
2103 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
2105 for x in atom.use.required:
2106 if iuse_re.match(x) is None:
2107 missing_iuse.append(x)
2110 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2111 missing_iuse_reasons.append((pkg, mreasons))
2113 need_enable = sorted(atom.use.enabled.difference(use))
2114 need_disable = sorted(atom.use.disabled.intersection(use))
2115 if need_enable or need_disable:
2117 changes.extend(colorize("red", "+" + x) \
2118 for x in need_enable)
2119 changes.extend(colorize("blue", "-" + x) \
2120 for x in need_disable)
2121 mreasons.append("Change USE: %s" % " ".join(changes))
2122 missing_use_reasons.append((pkg, mreasons))
2124 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2125 in missing_use_reasons if pkg not in masked_pkg_instances]
2127 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2128 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2130 show_missing_use = False
2131 if unmasked_use_reasons:
2132 # Only show the latest version.
2133 show_missing_use = unmasked_use_reasons[:1]
2134 elif unmasked_iuse_reasons:
2135 if missing_use_reasons:
2136 # All packages with required IUSE are masked,
2137 # so display a normal masking message.
2140 show_missing_use = unmasked_iuse_reasons
2142 if show_missing_use:
2143 print("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".")
2144 print("!!! One of the following packages is required to complete your request:")
2145 for pkg, mreasons in show_missing_use:
2146 print("- "+pkg.cpv+" ("+", ".join(mreasons)+")")
2148 elif masked_packages:
2150 colorize("BAD", "All ebuilds that could satisfy ") + \
2151 colorize("INFORM", xinfo) + \
2152 colorize("BAD", " have been masked."))
2153 print("!!! One of the following masked packages is required to complete your request:")
2154 have_eapi_mask = show_masked_packages(masked_packages)
2157 msg = ("The current version of portage supports " + \
2158 "EAPI '%s'. You must upgrade to a newer version" + \
2159 " of portage before EAPI masked packages can" + \
2160 " be installed.") % portage.const.EAPI
2161 from textwrap import wrap
2162 for line in wrap(msg, 75):
2167 print("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".")
2169 # Show parent nodes and the argument that pulled them in.
2170 traversed_nodes = set()
2173 while node is not None:
2174 traversed_nodes.add(node)
2175 msg.append('(dependency required by "%s" [%s])' % \
2176 (colorize('INFORM', str(node.cpv)), node.type_name))
2178 if node not in self._dynamic_config.digraph:
2179 # The parent is not in the graph due to backtracking.
2182 # When traversing to parents, prefer arguments over packages
2183 # since arguments are root nodes. Never traverse the same
2184 # package twice, in order to prevent an infinite loop.
2185 selected_parent = None
2186 for parent in self._dynamic_config.digraph.parent_nodes(node):
2187 if isinstance(parent, DependencyArg):
2188 msg.append('(dependency required by "%s" [argument])' % \
2189 (colorize('INFORM', str(parent))))
2190 selected_parent = None
2192 if parent not in traversed_nodes:
2193 selected_parent = parent
2194 node = selected_parent
2200 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2202 Iterate over Package instances of pkg_type matching the given atom.
2203 This does not check visibility and it also does not match USE for
2204 unbuilt ebuilds since USE are lazily calculated after visibility
2205 checks (to avoid the expense when possible).
2208 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2210 if hasattr(db, "xmatch"):
2211 cpv_list = db.xmatch("match-all", atom)
2213 cpv_list = db.match(atom)
2215 # USE=multislot can make an installed package appear as if
2216 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2217 # won't do any good as long as USE=multislot is enabled since
2218 # the newly built package still won't have the expected slot.
2219 # Therefore, assume that such SLOT dependencies are already
2220 # satisfied rather than forcing a rebuild.
2221 installed = pkg_type == 'installed'
2222 if installed and not cpv_list and atom.slot:
2223 for cpv in db.match(atom.cp):
2224 slot_available = False
2225 for other_db, other_type, other_built, \
2226 other_installed, other_keys in \
2227 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2230 other_db.aux_get(cpv, ["SLOT"])[0]:
2231 slot_available = True
2235 if not slot_available:
2237 inst_pkg = self._pkg(cpv, "installed",
2238 root_config, installed=installed)
2239 # Remove the slot from the atom and verify that
2240 # the package matches the resulting atom.
2241 atom_without_slot = portage.dep.remove_slot(atom)
2243 atom_without_slot += str(atom.use)
2244 atom_without_slot = portage.dep.Atom(atom_without_slot)
2245 if portage.match_from_list(
2246 atom_without_slot, [inst_pkg]):
2247 cpv_list = [inst_pkg.cpv]
2254 for cpv in cpv_list:
2256 pkg = self._pkg(cpv, pkg_type, root_config,
2257 installed=installed, onlydeps=onlydeps)
2258 except portage.exception.PackageNotFound:
2261 if pkg.cp != atom.cp:
2262 # A cpv can be returned from dbapi.match() as an
2263 # old-style virtual match even in cases when the
2264 # package does not actually PROVIDE the virtual.
2265 # Filter out any such false matches here.
2266 if not InternalPackageSet(initial_atoms=(atom,)
2267 ).findAtomForPackage(pkg):
2271 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2272 cache_key = (root, atom, onlydeps)
2273 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2276 if pkg and not existing:
2277 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2278 if existing and existing == pkg:
2279 # Update the cache to reflect that the
2280 # package has been added to the graph.
2282 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2284 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2285 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2288 settings = pkg.root_config.settings
2289 if visible(settings, pkg) and not (pkg.installed and \
2290 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2291 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2294 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2295 root_config = self._frozen_config.roots[root]
2296 pkgsettings = self._frozen_config.pkgsettings[root]
2297 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2298 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2299 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2300 # List of acceptable packages, ordered by type preference.
2301 matched_packages = []
2302 highest_version = None
2303 if not isinstance(atom, portage.dep.Atom):
2304 atom = portage.dep.Atom(atom)
2306 atom_set = InternalPackageSet(initial_atoms=(atom,))
2307 existing_node = None
2309 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2310 empty = "empty" in self._dynamic_config.myparams
2311 selective = "selective" in self._dynamic_config.myparams
2313 noreplace = "--noreplace" in self._frozen_config.myopts
2314 avoid_update = "--update" not in self._frozen_config.myopts
2315 # Behavior of the "selective" parameter depends on
2316 # whether or not a package matches an argument atom.
2317 # If an installed package provides an old-style
2318 # virtual that is no longer provided by an available
2319 # package, the installed package may match an argument
2320 # atom even though none of the available packages do.
2321 # Therefore, "selective" logic does not consider
2322 # whether or not an installed package matches an
2323 # argument atom. It only considers whether or not
2324 # available packages match argument atoms, which is
2325 # represented by the found_available_arg flag.
2326 found_available_arg = False
2327 for find_existing_node in True, False:
2330 for db, pkg_type, built, installed, db_keys in dbs:
2333 if installed and not find_existing_node:
2334 want_reinstall = reinstall or empty or \
2335 (found_available_arg and not selective)
2336 if want_reinstall and matched_packages:
2339 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom,
2341 if pkg in self._dynamic_config._runtime_pkg_mask:
2342 # The package has been masked by the backtracking logic
2345 # Make --noreplace take precedence over --newuse.
2346 if not pkg.installed and noreplace and \
2347 cpv in vardb.match(atom):
2348 # If the installed version is masked, it may
2349 # be necessary to look at lower versions,
2350 # in case there is a visible downgrade.
2352 reinstall_for_flags = None
2354 if not pkg.installed or \
2355 (matched_packages and not avoid_update):
2356 # Only enforce visibility on installed packages
2357 # if there is at least one other visible package
2358 # available. By filtering installed masked packages
2359 # here, packages that have been masked since they
2360 # were installed can be automatically downgraded
2361 # to an unmasked version.
2363 if not visible(pkgsettings, pkg):
2365 except portage.exception.InvalidDependString:
2369 # Enable upgrade or downgrade to a version
2370 # with visible KEYWORDS when the installed
2371 # version is masked by KEYWORDS, but never
2372 # reinstall the same exact version only due
2373 # to a KEYWORDS mask. See bug #252167.
2374 if matched_packages:
2376 different_version = None
2377 for avail_pkg in matched_packages:
2378 if not portage.dep.cpvequal(
2379 pkg.cpv, avail_pkg.cpv):
2380 different_version = avail_pkg
2382 if different_version is not None:
2383 # If the ebuild no longer exists or it's
2384 # keywords have been dropped, reject built
2385 # instances (installed or binary).
2386 # If --usepkgonly is enabled, assume that
2387 # the ebuild status should be ignored.
2390 pkgsettings._getMissingKeywords(
2391 pkg.cpv, pkg.metadata):
2396 pkg.cpv, "ebuild", root_config)
2397 except portage.exception.PackageNotFound:
2400 if not visible(pkgsettings, pkg_eb):
2403 # Calculation of USE for unbuilt ebuilds is relatively
2404 # expensive, so it is only performed lazily, after the
2405 # above visibility checks are complete.
2408 if root == self._frozen_config.target_root:
2410 myarg = next(self._iter_atoms_for_pkg(pkg))
2411 except StopIteration:
2413 except portage.exception.InvalidDependString:
2415 # masked by corruption
2417 if not installed and myarg:
2418 found_available_arg = True
2420 if atom.use and not pkg.built:
2421 use = pkg.use.enabled
2422 if atom.use.enabled.difference(use):
2424 if atom.use.disabled.intersection(use):
2426 if pkg.cp == atom_cp:
2427 if highest_version is None:
2428 highest_version = pkg
2429 elif pkg > highest_version:
2430 highest_version = pkg
2431 # At this point, we've found the highest visible
2432 # match from the current repo. Any lower versions
2433 # from this repo are ignored, so this so the loop
2434 # will always end with a break statement below
2436 if find_existing_node:
2437 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2440 # Use PackageSet.findAtomForPackage()
2441 # for PROVIDE support.
2442 if atom_set.findAtomForPackage(e_pkg):
2443 if highest_version and \
2444 e_pkg.cp == atom_cp and \
2445 e_pkg < highest_version and \
2446 e_pkg.slot_atom != highest_version.slot_atom:
2447 # There is a higher version available in a
2448 # different slot, so this existing node is
2452 matched_packages.append(e_pkg)
2453 existing_node = e_pkg
2455 # Compare built package to current config and
2456 # reject the built package if necessary.
2457 if built and not installed and \
2458 ("--newuse" in self._frozen_config.myopts or \
2459 "--reinstall" in self._frozen_config.myopts or \
2460 "--binpkg-respect-use" in self._frozen_config.myopts):
2461 iuses = pkg.iuse.all
2462 old_use = pkg.use.enabled
2464 pkgsettings.setcpv(myeb)
2466 pkgsettings.setcpv(pkg)
2467 now_use = pkgsettings["PORTAGE_USE"].split()
2468 forced_flags = set()
2469 forced_flags.update(pkgsettings.useforce)
2470 forced_flags.update(pkgsettings.usemask)
2472 if myeb and not usepkgonly:
2473 cur_iuse = myeb.iuse.all
2474 if self._reinstall_for_flags(forced_flags,
2478 # Compare current config to installed package
2479 # and do not reinstall if possible.
2480 if not installed and \
2481 ("--newuse" in self._frozen_config.myopts or \
2482 "--reinstall" in self._frozen_config.myopts) and \
2483 cpv in vardb.match(atom):
2484 pkgsettings.setcpv(pkg)
2485 forced_flags = set()
2486 forced_flags.update(pkgsettings.useforce)
2487 forced_flags.update(pkgsettings.usemask)
2488 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2489 old_iuse = set(filter_iuse_defaults(
2490 vardb.aux_get(cpv, ["IUSE"])[0].split()))
2491 cur_use = pkg.use.enabled
2492 cur_iuse = pkg.iuse.all
2493 reinstall_for_flags = \
2494 self._reinstall_for_flags(
2495 forced_flags, old_use, old_iuse,
2497 if reinstall_for_flags:
2501 matched_packages.append(pkg)
2502 if reinstall_for_flags:
2503 self._dynamic_config._reinstall_nodes[pkg] = \
2507 if not matched_packages:
2510 if "--debug" in self._frozen_config.myopts:
2511 for pkg in matched_packages:
2512 portage.writemsg("%s %s\n" % \
2513 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2515 # Filter out any old-style virtual matches if they are
2516 # mixed with new-style virtual matches.
2518 if len(matched_packages) > 1 and \
2519 "virtual" == portage.catsplit(cp)[0]:
2520 for pkg in matched_packages:
2523 # Got a new-style virtual, so filter
2524 # out any old-style virtuals.
2525 matched_packages = [pkg for pkg in matched_packages \
2529 if len(matched_packages) > 1:
2531 if existing_node is not None:
2532 return existing_node, existing_node
2533 for pkg in matched_packages:
2535 return pkg, existing_node
2537 bestmatch = portage.best(
2538 [pkg.cpv for pkg in matched_packages])
2539 matched_packages = [pkg for pkg in matched_packages \
2540 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2542 # ordered by type preference ("ebuild" type is the last resort)
2543 return matched_packages[-1], existing_node
2545 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2547 Select packages that have already been added to the graph or
2548 those that are installed and have not been scheduled for
2551 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
2552 matches = graph_db.match_pkgs(atom)
2555 pkg = matches[-1] # highest match
2556 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2557 return pkg, in_graph
2559 def _complete_graph(self):
2561 Add any deep dependencies of required sets (args, system, world) that
2562 have not been pulled into the graph yet. This ensures that the graph
2563 is consistent such that initially satisfied deep dependencies are not
2564 broken in the new graph. Initially unsatisfied dependencies are
2565 irrelevant since we only want to avoid breaking dependencies that are
2568 Since this method can consume enough time to disturb users, it is
2569 currently only enabled by the --complete-graph option.
2571 if "--buildpkgonly" in self._frozen_config.myopts or \
2572 "recurse" not in self._dynamic_config.myparams:
2575 if "complete" not in self._dynamic_config.myparams:
2576 # Skip this to avoid consuming enough time to disturb users.
2579 # Put the depgraph into a mode that causes it to only
2580 # select packages that have already been added to the
2581 # graph or those that are installed and have not been
2582 # scheduled for replacement. Also, toggle the "deep"
2583 # parameter so that all dependencies are traversed and
2585 self._select_atoms = self._select_atoms_from_graph
2586 self._select_package = self._select_pkg_from_graph
2587 already_deep = self._dynamic_config.myparams.get("deep") is True
2588 if not already_deep:
2589 self._dynamic_config.myparams["deep"] = True
2591 for root in self._frozen_config.roots:
2592 required_set_names = self._frozen_config._required_set_names.copy()
2593 if root == self._frozen_config.target_root and \
2594 (already_deep or "empty" in self._dynamic_config.myparams):
2595 required_set_names.difference_update(self._dynamic_config._sets)
2596 if not required_set_names and not self._dynamic_config._ignored_deps:
2598 root_config = self._frozen_config.roots[root]
2599 setconfig = root_config.setconfig
2601 # Reuse existing SetArg instances when available.
2602 for arg in self._dynamic_config.digraph.root_nodes():
2603 if not isinstance(arg, SetArg):
2605 if arg.root_config != root_config:
2607 if arg.name in required_set_names:
2609 required_set_names.remove(arg.name)
2610 # Create new SetArg instances only when necessary.
2611 for s in required_set_names:
2612 expanded_set = InternalPackageSet(
2613 initial_atoms=setconfig.getSetAtoms(s))
2614 atom = SETPREFIX + s
2615 args.append(SetArg(arg=atom, set=expanded_set,
2616 root_config=root_config))
2617 vardb = root_config.trees["vartree"].dbapi
2619 for atom in arg.set:
2620 self._dynamic_config._dep_stack.append(
2621 Dependency(atom=atom, root=root, parent=arg))
2622 if self._dynamic_config._ignored_deps:
2623 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
2624 self._dynamic_config._ignored_deps = []
2625 if not self._create_graph(allow_unsatisfied=True):
2627 # Check the unsatisfied deps to see if any initially satisfied deps
2628 # will become unsatisfied due to an upgrade. Initially unsatisfied
2629 # deps are irrelevant since we only want to avoid breaking deps
2630 # that are initially satisfied.
2631 while self._dynamic_config._unsatisfied_deps:
2632 dep = self._dynamic_config._unsatisfied_deps.pop()
2633 matches = vardb.match_pkgs(dep.atom)
2635 self._dynamic_config._initially_unsatisfied_deps.append(dep)
2637 # An scheduled installation broke a deep dependency.
2638 # Add the installed package to the graph so that it
2639 # will be appropriately reported as a slot collision
2640 # (possibly solvable via backtracking).
2641 pkg = matches[-1] # highest match
2642 if not self._add_pkg(pkg, dep):
2644 if not self._create_graph(allow_unsatisfied=True):
2648 def _pkg(self, cpv, type_name, root_config, installed=False,
2651 Get a package instance from the cache, or create a new
2652 one if necessary. Raises PackageNotFound from aux_get if it
2653 failures for some reason (package does not exist or is
2657 if installed or onlydeps:
2658 operation = "nomerge"
2659 pkg = self._frozen_config._pkg_cache.get(
2660 (type_name, root_config.root, cpv, operation))
2661 if pkg is None and onlydeps and not installed:
2662 # Maybe it already got pulled in as a "merge" node.
2663 pkg = self._dynamic_config.mydbapi[root_config.root].get(
2664 (type_name, root_config.root, cpv, 'merge'))
2667 tree_type = self.pkg_tree_map[type_name]
2668 db = root_config.trees[tree_type].dbapi
2669 db_keys = list(self._frozen_config._trees_orig[root_config.root][
2670 tree_type].dbapi._aux_cache_keys)
2672 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
2674 raise portage.exception.PackageNotFound(cpv)
2675 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
2676 installed=installed, metadata=metadata, onlydeps=onlydeps,
2677 root_config=root_config, type_name=type_name)
2678 self._frozen_config._pkg_cache[pkg] = pkg
2681 def _validate_blockers(self):
2682 """Remove any blockers from the digraph that do not match any of the
2683 packages within the graph. If necessary, create hard deps to ensure
2684 correct merge order such that mutually blocking packages are never
2685 installed simultaneously."""
2687 if "--buildpkgonly" in self._frozen_config.myopts or \
2688 "--nodeps" in self._frozen_config.myopts:
2691 #if "deep" in self._dynamic_config.myparams:
2693 # Pull in blockers from all installed packages that haven't already
2694 # been pulled into the depgraph. This is not enabled by default
2695 # due to the performance penalty that is incurred by all the
2696 # additional dep_check calls that are required.
2698 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2699 for myroot in self._frozen_config.trees:
2700 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
2701 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
2702 pkgsettings = self._frozen_config.pkgsettings[myroot]
2703 final_db = self._dynamic_config.mydbapi[myroot]
2705 blocker_cache = BlockerCache(myroot, vardb)
2706 stale_cache = set(blocker_cache)
2709 stale_cache.discard(cpv)
2710 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
2712 # Check for masked installed packages. Only warn about
2713 # packages that are in the graph in order to avoid warning
2714 # about those that will be automatically uninstalled during
2715 # the merge process or by --depclean.
2717 if pkg_in_graph and not visible(pkgsettings, pkg):
2718 self._dynamic_config._masked_installed.add(pkg)
2720 blocker_atoms = None
2726 self._dynamic_config._blocker_parents.child_nodes(pkg))
2731 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
2734 if blockers is not None:
2735 blockers = set(blocker.atom for blocker in blockers)
2737 # If this node has any blockers, create a "nomerge"
2738 # node for it so that they can be enforced.
2739 self._spinner_update()
2740 blocker_data = blocker_cache.get(cpv)
2741 if blocker_data is not None and \
2742 blocker_data.counter != long(pkg.metadata["COUNTER"]):
2745 # If blocker data from the graph is available, use
2746 # it to validate the cache and update the cache if
2748 if blocker_data is not None and \
2749 blockers is not None:
2750 if not blockers.symmetric_difference(
2751 blocker_data.atoms):
2755 if blocker_data is None and \
2756 blockers is not None:
2757 # Re-use the blockers from the graph.
2758 blocker_atoms = sorted(blockers)
2759 counter = long(pkg.metadata["COUNTER"])
2761 blocker_cache.BlockerData(counter, blocker_atoms)
2762 blocker_cache[pkg.cpv] = blocker_data
2766 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
2768 # Use aux_get() to trigger FakeVartree global
2769 # updates on *DEPEND when appropriate.
2770 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2771 # It is crucial to pass in final_db here in order to
2772 # optimize dep_check calls by eliminating atoms via
2773 # dep_wordreduce and dep_eval calls.
2775 portage.dep._dep_check_strict = False
2777 success, atoms = portage.dep_check(depstr,
2778 final_db, pkgsettings, myuse=pkg.use.enabled,
2779 trees=self._dynamic_config._graph_trees, myroot=myroot)
2780 except Exception as e:
2781 if isinstance(e, SystemExit):
2783 # This is helpful, for example, if a ValueError
2784 # is thrown from cpv_expand due to multiple
2785 # matches (this can happen if an atom lacks a
2787 show_invalid_depstring_notice(
2788 pkg, depstr, str(e))
2792 portage.dep._dep_check_strict = True
2794 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2795 if replacement_pkg and \
2796 replacement_pkg[0].operation == "merge":
2797 # This package is being replaced anyway, so
2798 # ignore invalid dependencies so as not to
2799 # annoy the user too much (otherwise they'd be
2800 # forced to manually unmerge it first).
2802 show_invalid_depstring_notice(pkg, depstr, atoms)
2804 blocker_atoms = [myatom for myatom in atoms \
2806 blocker_atoms.sort()
2807 counter = long(pkg.metadata["COUNTER"])
2808 blocker_cache[cpv] = \
2809 blocker_cache.BlockerData(counter, blocker_atoms)
2812 for atom in blocker_atoms:
2813 blocker = Blocker(atom=atom,
2814 eapi=pkg.metadata["EAPI"], root=myroot)
2815 self._dynamic_config._blocker_parents.add(blocker, pkg)
2816 except portage.exception.InvalidAtom as e:
2817 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2818 show_invalid_depstring_notice(
2819 pkg, depstr, "Invalid Atom: %s" % (e,))
2821 for cpv in stale_cache:
2822 del blocker_cache[cpv]
2823 blocker_cache.flush()
2826 # Discard any "uninstall" tasks scheduled by previous calls
2827 # to this method, since those tasks may not make sense given
2828 # the current graph state.
2829 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
2830 if previous_uninstall_tasks:
2831 self._dynamic_config._blocker_uninstalls = digraph()
2832 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
2834 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
2835 self._spinner_update()
2836 root_config = self._frozen_config.roots[blocker.root]
2837 virtuals = root_config.settings.getvirtuals()
2838 myroot = blocker.root
2839 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
2840 final_db = self._dynamic_config.mydbapi[myroot]
2842 provider_virtual = False
2843 if blocker.cp in virtuals and \
2844 not self._have_new_virt(blocker.root, blocker.cp):
2845 provider_virtual = True
2847 # Use this to check PROVIDE for each matched package
2849 atom_set = InternalPackageSet(
2850 initial_atoms=[blocker.atom])
2852 if provider_virtual:
2854 for provider_entry in virtuals[blocker.cp]:
2856 portage.dep_getkey(provider_entry)
2857 atoms.append(Atom(blocker.atom.replace(
2858 blocker.cp, provider_cp)))
2860 atoms = [blocker.atom]
2862 blocked_initial = set()
2864 for pkg in initial_db.match_pkgs(atom):
2865 if atom_set.findAtomForPackage(pkg):
2866 blocked_initial.add(pkg)
2868 blocked_final = set()
2870 for pkg in final_db.match_pkgs(atom):
2871 if atom_set.findAtomForPackage(pkg):
2872 blocked_final.add(pkg)
2874 if not blocked_initial and not blocked_final:
2875 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
2876 self._dynamic_config._blocker_parents.remove(blocker)
2877 # Discard any parents that don't have any more blockers.
2878 for pkg in parent_pkgs:
2879 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
2880 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
2881 self._dynamic_config._blocker_parents.remove(pkg)
2883 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
2884 unresolved_blocks = False
2885 depends_on_order = set()
2886 for pkg in blocked_initial:
2887 if pkg.slot_atom == parent.slot_atom and \
2888 not blocker.atom.blocker.overlap.forbid:
2889 # New !!atom blockers do not allow temporary
2890 # simulaneous installation, so unlike !atom
2891 # blockers, !!atom blockers aren't ignored
2892 # when they match other packages occupying
2895 if parent.installed:
2896 # Two currently installed packages conflict with
2897 # eachother. Ignore this case since the damage
2898 # is already done and this would be likely to
2899 # confuse users if displayed like a normal blocker.
2902 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
2904 if parent.operation == "merge":
2905 # Maybe the blocked package can be replaced or simply
2906 # unmerged to resolve this block.
2907 depends_on_order.add((pkg, parent))
2909 # None of the above blocker resolutions techniques apply,
2910 # so apparently this one is unresolvable.
2911 unresolved_blocks = True
2912 for pkg in blocked_final:
2913 if pkg.slot_atom == parent.slot_atom and \
2914 not blocker.atom.blocker.overlap.forbid:
2915 # New !!atom blockers do not allow temporary
2916 # simulaneous installation, so unlike !atom
2917 # blockers, !!atom blockers aren't ignored
2918 # when they match other packages occupying
2921 if parent.operation == "nomerge" and \
2922 pkg.operation == "nomerge":
2923 # This blocker will be handled the next time that a
2924 # merge of either package is triggered.
2927 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
2929 # Maybe the blocking package can be
2930 # unmerged to resolve this block.
2931 if parent.operation == "merge" and pkg.installed:
2932 depends_on_order.add((pkg, parent))
2934 elif parent.operation == "nomerge":
2935 depends_on_order.add((parent, pkg))
2937 # None of the above blocker resolutions techniques apply,
2938 # so apparently this one is unresolvable.
2939 unresolved_blocks = True
2941 # Make sure we don't unmerge any package that have been pulled
2943 if not unresolved_blocks and depends_on_order:
2944 for inst_pkg, inst_task in depends_on_order:
2945 if self._dynamic_config.digraph.contains(inst_pkg) and \
2946 self._dynamic_config.digraph.parent_nodes(inst_pkg):
2947 unresolved_blocks = True
2950 if not unresolved_blocks and depends_on_order:
2951 for inst_pkg, inst_task in depends_on_order:
2952 uninst_task = Package(built=inst_pkg.built,
2953 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
2954 metadata=inst_pkg.metadata,
2955 operation="uninstall",
2956 root_config=inst_pkg.root_config,
2957 type_name=inst_pkg.type_name)
2958 # Enforce correct merge order with a hard dep.
2959 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
2960 priority=BlockerDepPriority.instance)
2961 # Count references to this blocker so that it can be
2962 # invalidated after nodes referencing it have been
2964 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
2965 if not unresolved_blocks and not depends_on_order:
2966 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
2967 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
2968 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
2969 self._dynamic_config._blocker_parents.remove(blocker)
2970 if not self._dynamic_config._blocker_parents.child_nodes(parent):
2971 self._dynamic_config._blocker_parents.remove(parent)
2972 if unresolved_blocks:
2973 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
2977 def _accept_blocker_conflicts(self):
2979 for x in ("--buildpkgonly", "--fetchonly",
2980 "--fetch-all-uri", "--nodeps"):
2981 if x in self._frozen_config.myopts:
2986 def _merge_order_bias(self, mygraph):
2988 For optimal leaf node selection, promote deep system runtime deps and
2989 order nodes from highest to lowest overall reference count.
2993 for node in mygraph.order:
2994 node_info[node] = len(mygraph.parent_nodes(node))
2995 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
2997 def cmp_merge_preference(node1, node2):
2999 if node1.operation == 'uninstall':
3000 if node2.operation == 'uninstall':
3004 if node2.operation == 'uninstall':
3005 if node1.operation == 'uninstall':
3009 node1_sys = node1 in deep_system_deps
3010 node2_sys = node2 in deep_system_deps
3011 if node1_sys != node2_sys:
3016 return node_info[node2] - node_info[node1]
3018 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3020 def altlist(self, reversed=False):
3022 while self._dynamic_config._serialized_tasks_cache is None:
3023 self._resolve_conflicts()
3025 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3026 self._serialize_tasks()
3027 except self._serialize_tasks_retry:
3030 retlist = self._dynamic_config._serialized_tasks_cache[:]
3035 def schedulerGraph(self):
3037 The scheduler graph is identical to the normal one except that
3038 uninstall edges are reversed in specific cases that require
3039 conflicting packages to be temporarily installed simultaneously.
3040 This is intended for use by the Scheduler in it's parallelization
3041 logic. It ensures that temporary simultaneous installation of
3042 conflicting packages is avoided when appropriate (especially for
3043 !!atom blockers), but allowed in specific cases that require it.
3045 Note that this method calls break_refs() which alters the state of
3046 internal Package instances such that this depgraph instance should
3047 not be used to perform any more calculations.
3049 if self._dynamic_config._scheduler_graph is None:
3051 self.break_refs(self._dynamic_config._scheduler_graph.order)
3052 return self._dynamic_config._scheduler_graph
3054 def break_refs(self, nodes):
3056 Take a mergelist like that returned from self.altlist() and
3057 break any references that lead back to the depgraph. This is
3058 useful if you want to hold references to packages without
3059 also holding the depgraph on the heap.
3062 if hasattr(node, "root_config"):
3063 # The FakeVartree references the _package_cache which
3064 # references the depgraph. So that Package instances don't
3065 # hold the depgraph and FakeVartree on the heap, replace
3066 # the RootConfig that references the FakeVartree with the
3067 # original RootConfig instance which references the actual
3069 node.root_config = \
3070 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
3072 def _resolve_conflicts(self):
3073 if not self._complete_graph():
3074 raise self._unknown_internal_error()
3076 if not self._validate_blockers():
3077 raise self._unknown_internal_error()
3079 if self._dynamic_config._slot_collision_info:
3080 self._process_slot_conflicts()
3082 def _serialize_tasks(self):
3084 if "--debug" in self._frozen_config.myopts:
3085 writemsg("\ndigraph:\n\n", noiselevel=-1)
3086 self._dynamic_config.digraph.debug_print()
3087 writemsg("\n", noiselevel=-1)
3089 scheduler_graph = self._dynamic_config.digraph.copy()
3091 if '--nodeps' in self._frozen_config.myopts:
3092 # Preserve the package order given on the command line.
3093 return ([node for node in scheduler_graph \
3094 if isinstance(node, Package) \
3095 and node.operation == 'merge'], scheduler_graph)
3097 mygraph=self._dynamic_config.digraph.copy()
3098 # Prune "nomerge" root nodes if nothing depends on them, since
3099 # otherwise they slow down merge order calculation. Don't remove
3100 # non-root nodes since they help optimize merge order in some cases
3101 # such as revdep-rebuild.
3102 removed_nodes = set()
3104 for node in mygraph.root_nodes():
3105 if not isinstance(node, Package) or \
3106 node.installed or node.onlydeps:
3107 removed_nodes.add(node)
3109 self._spinner_update()
3110 mygraph.difference_update(removed_nodes)
3111 if not removed_nodes:
3113 removed_nodes.clear()
3114 self._merge_order_bias(mygraph)
3115 def cmp_circular_bias(n1, n2):
3117 RDEPEND is stronger than PDEPEND and this function
3118 measures such a strength bias within a circular
3119 dependency relationship.
3121 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3122 ignore_priority=priority_range.ignore_medium_soft)
3123 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3124 ignore_priority=priority_range.ignore_medium_soft)
3125 if n1_n2_medium == n2_n1_medium:
3130 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
3132 # Contains uninstall tasks that have been scheduled to
3133 # occur after overlapping blockers have been installed.
3134 scheduled_uninstalls = set()
3135 # Contains any Uninstall tasks that have been ignored
3136 # in order to avoid the circular deps code path. These
3137 # correspond to blocker conflicts that could not be
3139 ignored_uninstall_tasks = set()
3140 have_uninstall_task = False
3141 complete = "complete" in self._dynamic_config.myparams
3144 def get_nodes(**kwargs):
3146 Returns leaf nodes excluding Uninstall instances
3147 since those should be executed as late as possible.
3149 return [node for node in mygraph.leaf_nodes(**kwargs) \
3150 if isinstance(node, Package) and \
3151 (node.operation != "uninstall" or \
3152 node in scheduled_uninstalls)]
3154 # sys-apps/portage needs special treatment if ROOT="/"
3155 running_root = self._frozen_config._running_root.root
3156 from portage.const import PORTAGE_PACKAGE_ATOM
3157 runtime_deps = InternalPackageSet(
3158 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3159 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
3160 PORTAGE_PACKAGE_ATOM)
3161 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
3162 PORTAGE_PACKAGE_ATOM)
3165 running_portage = running_portage[0]
3167 running_portage = None
3169 if replacement_portage:
3170 replacement_portage = replacement_portage[0]
3172 replacement_portage = None
3174 if replacement_portage == running_portage:
3175 replacement_portage = None
3177 if replacement_portage is not None:
3178 # update from running_portage to replacement_portage asap
3179 asap_nodes.append(replacement_portage)
3181 if running_portage is not None:
3183 portage_rdepend = self._select_atoms_highest_available(
3184 running_root, running_portage.metadata["RDEPEND"],
3185 myuse=running_portage.use.enabled,
3186 parent=running_portage, strict=False)
3187 except portage.exception.InvalidDependString as e:
3188 portage.writemsg("!!! Invalid RDEPEND in " + \
3189 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3190 (running_root, running_portage.cpv, e), noiselevel=-1)
3192 portage_rdepend = {running_portage : []}
3193 for atoms in portage_rdepend.values():
3194 runtime_deps.update(atom for atom in atoms \
3195 if not atom.blocker)
3197 def gather_deps(ignore_priority, mergeable_nodes,
3198 selected_nodes, node):
3200 Recursively gather a group of nodes that RDEPEND on
3201 eachother. This ensures that they are merged as a group
3202 and get their RDEPENDs satisfied as soon as possible.
3204 if node in selected_nodes:
3206 if node not in mergeable_nodes:
3208 if node == replacement_portage and \
3209 mygraph.child_nodes(node,
3210 ignore_priority=priority_range.ignore_medium_soft):
3211 # Make sure that portage always has all of it's
3212 # RDEPENDs installed first.
3214 selected_nodes.add(node)
3215 for child in mygraph.child_nodes(node,
3216 ignore_priority=ignore_priority):
3217 if not gather_deps(ignore_priority,
3218 mergeable_nodes, selected_nodes, child):
3222 def ignore_uninst_or_med(priority):
3223 if priority is BlockerDepPriority.instance:
3225 return priority_range.ignore_medium(priority)
3227 def ignore_uninst_or_med_soft(priority):
3228 if priority is BlockerDepPriority.instance:
3230 return priority_range.ignore_medium_soft(priority)
3232 tree_mode = "--tree" in self._frozen_config.myopts
3233 # Tracks whether or not the current iteration should prefer asap_nodes
3234 # if available. This is set to False when the previous iteration
3235 # failed to select any nodes. It is reset whenever nodes are
3236 # successfully selected.
3239 # Controls whether or not the current iteration should drop edges that
3240 # are "satisfied" by installed packages, in order to solve circular
3241 # dependencies. The deep runtime dependencies of installed packages are
3242 # not checked in this case (bug #199856), so it must be avoided
3243 # whenever possible.
3244 drop_satisfied = False
3246 # State of variables for successive iterations that loosen the
3247 # criteria for node selection.
3249 # iteration prefer_asap drop_satisfied
3254 # If no nodes are selected on the last iteration, it is due to
3255 # unresolved blockers or circular dependencies.
3257 while not mygraph.empty():
3258 self._spinner_update()
3259 selected_nodes = None
3260 ignore_priority = None
3261 if drop_satisfied or (prefer_asap and asap_nodes):
3262 priority_range = DepPrioritySatisfiedRange
3264 priority_range = DepPriorityNormalRange
3265 if prefer_asap and asap_nodes:
3266 # ASAP nodes are merged before their soft deps. Go ahead and
3267 # select root nodes here if necessary, since it's typical for
3268 # the parent to have been removed from the graph already.
3269 asap_nodes = [node for node in asap_nodes \
3270 if mygraph.contains(node)]
3271 for node in asap_nodes:
3272 if not mygraph.child_nodes(node,
3273 ignore_priority=priority_range.ignore_soft):
3274 selected_nodes = [node]
3275 asap_nodes.remove(node)
3277 if not selected_nodes and \
3278 not (prefer_asap and asap_nodes):
3279 for i in range(priority_range.NONE,
3280 priority_range.MEDIUM_SOFT + 1):
3281 ignore_priority = priority_range.ignore_priority[i]
3282 nodes = get_nodes(ignore_priority=ignore_priority)
3284 # If there is a mixture of merges and uninstalls,
3285 # do the uninstalls first.
3287 good_uninstalls = []
3289 if node.operation == "uninstall":
3290 good_uninstalls.append(node)
3293 nodes = good_uninstalls
3297 if ignore_priority is None and not tree_mode:
3298 # Greedily pop all of these nodes since no
3299 # relationship has been ignored. This optimization
3300 # destroys --tree output, so it's disabled in tree
3302 selected_nodes = nodes
3304 # For optimal merge order:
3305 # * Only pop one node.
3306 # * Removing a root node (node without a parent)
3307 # will not produce a leaf node, so avoid it.
3308 # * It's normal for a selected uninstall to be a
3309 # root node, so don't check them for parents.
3311 if node.operation == "uninstall" or \
3312 mygraph.parent_nodes(node):
3313 selected_nodes = [node]
3319 if not selected_nodes:
3320 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
3322 mergeable_nodes = set(nodes)
3323 if prefer_asap and asap_nodes:
3325 for i in range(priority_range.SOFT,
3326 priority_range.MEDIUM_SOFT + 1):
3327 ignore_priority = priority_range.ignore_priority[i]
3329 if not mygraph.parent_nodes(node):
3331 selected_nodes = set()
3332 if gather_deps(ignore_priority,
3333 mergeable_nodes, selected_nodes, node):
3336 selected_nodes = None
3340 if prefer_asap and asap_nodes and not selected_nodes:
3341 # We failed to find any asap nodes to merge, so ignore
3342 # them for the next iteration.
3346 if selected_nodes and ignore_priority is not None:
3347 # Try to merge ignored medium_soft deps as soon as possible
3348 # if they're not satisfied by installed packages.
3349 for node in selected_nodes:
3350 children = set(mygraph.child_nodes(node))
3351 soft = children.difference(
3352 mygraph.child_nodes(node,
3353 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
3354 medium_soft = children.difference(
3355 mygraph.child_nodes(node,
3357 DepPrioritySatisfiedRange.ignore_medium_soft))
3358 medium_soft.difference_update(soft)
3359 for child in medium_soft:
3360 if child in selected_nodes:
3362 if child in asap_nodes:
3364 asap_nodes.append(child)
3366 if selected_nodes and len(selected_nodes) > 1:
3367 if not isinstance(selected_nodes, list):
3368 selected_nodes = list(selected_nodes)
3369 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
3371 if not selected_nodes and not myblocker_uninstalls.is_empty():
3372 # An Uninstall task needs to be executed in order to
3373 # avoid conflict if possible.
3376 priority_range = DepPrioritySatisfiedRange
3378 priority_range = DepPriorityNormalRange
3380 mergeable_nodes = get_nodes(
3381 ignore_priority=ignore_uninst_or_med)
3383 min_parent_deps = None
3385 for task in myblocker_uninstalls.leaf_nodes():
3386 # Do some sanity checks so that system or world packages
3387 # don't get uninstalled inappropriately here (only really
3388 # necessary when --complete-graph has not been enabled).
3390 if task in ignored_uninstall_tasks:
3393 if task in scheduled_uninstalls:
3394 # It's been scheduled but it hasn't
3395 # been executed yet due to dependence
3396 # on installation of blocking packages.
3399 root_config = self._frozen_config.roots[task.root]
3400 inst_pkg = self._pkg(task.cpv, "installed", root_config,
3403 if self._dynamic_config.digraph.contains(inst_pkg):
3406 forbid_overlap = False
3407 heuristic_overlap = False
3408 for blocker in myblocker_uninstalls.parent_nodes(task):
3409 if blocker.eapi in ("0", "1"):
3410 heuristic_overlap = True
3411 elif blocker.atom.blocker.overlap.forbid:
3412 forbid_overlap = True
3414 if forbid_overlap and running_root == task.root:
3417 if heuristic_overlap and running_root == task.root:
3418 # Never uninstall sys-apps/portage or it's essential
3419 # dependencies, except through replacement.
3421 runtime_dep_atoms = \
3422 list(runtime_deps.iterAtomsForPackage(task))
3423 except portage.exception.InvalidDependString as e:
3424 portage.writemsg("!!! Invalid PROVIDE in " + \
3425 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3426 (task.root, task.cpv, e), noiselevel=-1)
3430 # Don't uninstall a runtime dep if it appears
3431 # to be the only suitable one installed.
3433 vardb = root_config.trees["vartree"].dbapi
3434 for atom in runtime_dep_atoms:
3435 other_version = None
3436 for pkg in vardb.match_pkgs(atom):
3437 if pkg.cpv == task.cpv and \
3438 pkg.metadata["COUNTER"] == \
3439 task.metadata["COUNTER"]:
3443 if other_version is None:
3449 # For packages in the system set, don't take
3450 # any chances. If the conflict can't be resolved
3451 # by a normal replacement operation then abort.
3454 for atom in root_config.sets[
3455 "system"].iterAtomsForPackage(task):
3458 except portage.exception.InvalidDependString as e:
3459 portage.writemsg("!!! Invalid PROVIDE in " + \
3460 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3461 (task.root, task.cpv, e), noiselevel=-1)
3467 # Note that the world check isn't always
3468 # necessary since self._complete_graph() will
3469 # add all packages from the system and world sets to the
3470 # graph. This just allows unresolved conflicts to be
3471 # detected as early as possible, which makes it possible
3472 # to avoid calling self._complete_graph() when it is
3473 # unnecessary due to blockers triggering an abortion.
3475 # For packages in the world set, go ahead an uninstall
3476 # when necessary, as long as the atom will be satisfied
3477 # in the final state.
3478 graph_db = self._dynamic_config.mydbapi[task.root]
3481 for atom in root_config.sets[
3482 "world"].iterAtomsForPackage(task):
3484 for pkg in graph_db.match_pkgs(atom):
3491 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
3493 except portage.exception.InvalidDependString as e:
3494 portage.writemsg("!!! Invalid PROVIDE in " + \
3495 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3496 (task.root, task.cpv, e), noiselevel=-1)
3502 # Check the deps of parent nodes to ensure that
3503 # the chosen task produces a leaf node. Maybe
3504 # this can be optimized some more to make the
3505 # best possible choice, but the current algorithm
3506 # is simple and should be near optimal for most
3508 mergeable_parent = False
3510 for parent in mygraph.parent_nodes(task):
3511 parent_deps.update(mygraph.child_nodes(parent,
3512 ignore_priority=priority_range.ignore_medium_soft))
3513 if parent in mergeable_nodes and \
3514 gather_deps(ignore_uninst_or_med_soft,
3515 mergeable_nodes, set(), parent):
3516 mergeable_parent = True
3518 if not mergeable_parent:
3521 parent_deps.remove(task)
3522 if min_parent_deps is None or \
3523 len(parent_deps) < min_parent_deps:
3524 min_parent_deps = len(parent_deps)
3527 if uninst_task is not None:
3528 # The uninstall is performed only after blocking
3529 # packages have been merged on top of it. File
3530 # collisions between blocking packages are detected
3531 # and removed from the list of files to be uninstalled.
3532 scheduled_uninstalls.add(uninst_task)
3533 parent_nodes = mygraph.parent_nodes(uninst_task)
3535 # Reverse the parent -> uninstall edges since we want
3536 # to do the uninstall after blocking packages have
3537 # been merged on top of it.
3538 mygraph.remove(uninst_task)
3539 for blocked_pkg in parent_nodes:
3540 mygraph.add(blocked_pkg, uninst_task,
3541 priority=BlockerDepPriority.instance)
3542 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3543 scheduler_graph.add(blocked_pkg, uninst_task,
3544 priority=BlockerDepPriority.instance)
3546 # Sometimes a merge node will render an uninstall
3547 # node unnecessary (due to occupying the same SLOT),
3548 # and we want to avoid executing a separate uninstall
3549 # task in that case.
3550 slot_node = self._dynamic_config.mydbapi[uninst_task.root
3551 ].match_pkgs(uninst_task.slot_atom)
3553 slot_node[0].operation == "merge":
3554 mygraph.add(slot_node[0], uninst_task,
3555 priority=BlockerDepPriority.instance)
3557 # Reset the state variables for leaf node selection and
3558 # continue trying to select leaf nodes.
3560 drop_satisfied = False
3563 if not selected_nodes:
3564 # Only select root nodes as a last resort. This case should
3565 # only trigger when the graph is nearly empty and the only
3566 # remaining nodes are isolated (no parents or children). Since
3567 # the nodes must be isolated, ignore_priority is not needed.
3568 selected_nodes = get_nodes()
3570 if not selected_nodes and not drop_satisfied:
3571 drop_satisfied = True
3574 if not selected_nodes and not myblocker_uninstalls.is_empty():
3575 # If possible, drop an uninstall task here in order to avoid
3576 # the circular deps code path. The corresponding blocker will
3577 # still be counted as an unresolved conflict.
3579 for node in myblocker_uninstalls.leaf_nodes():
3581 mygraph.remove(node)
3586 ignored_uninstall_tasks.add(node)
3589 if uninst_task is not None:
3590 # Reset the state variables for leaf node selection and
3591 # continue trying to select leaf nodes.
3593 drop_satisfied = False
3596 if not selected_nodes:
3597 self._dynamic_config._circular_deps_for_display = mygraph
3598 raise self._unknown_internal_error()
3600 # At this point, we've succeeded in selecting one or more nodes, so
3601 # reset state variables for leaf node selection.
3603 drop_satisfied = False
3605 mygraph.difference_update(selected_nodes)
3607 for node in selected_nodes:
3608 if isinstance(node, Package) and \
3609 node.operation == "nomerge":
3612 # Handle interactions between blockers
3613 # and uninstallation tasks.
3614 solved_blockers = set()
3616 if isinstance(node, Package) and \
3617 "uninstall" == node.operation:
3618 have_uninstall_task = True
3621 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
3622 previous_cpv = vardb.match(node.slot_atom)
3624 # The package will be replaced by this one, so remove
3625 # the corresponding Uninstall task if necessary.
3626 previous_cpv = previous_cpv[0]
3628 ("installed", node.root, previous_cpv, "uninstall")
3630 mygraph.remove(uninst_task)
3634 if uninst_task is not None and \
3635 uninst_task not in ignored_uninstall_tasks and \
3636 myblocker_uninstalls.contains(uninst_task):
3637 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3638 myblocker_uninstalls.remove(uninst_task)
3639 # Discard any blockers that this Uninstall solves.
3640 for blocker in blocker_nodes:
3641 if not myblocker_uninstalls.child_nodes(blocker):
3642 myblocker_uninstalls.remove(blocker)
3643 solved_blockers.add(blocker)
3645 retlist.append(node)
3647 if (isinstance(node, Package) and \
3648 "uninstall" == node.operation) or \
3649 (uninst_task is not None and \
3650 uninst_task in scheduled_uninstalls):
3651 # Include satisfied blockers in the merge list
3652 # since the user might be interested and also
3653 # it serves as an indicator that blocking packages
3654 # will be temporarily installed simultaneously.
3655 for blocker in solved_blockers:
3656 retlist.append(Blocker(atom=blocker.atom,
3657 root=blocker.root, eapi=blocker.eapi,
3660 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
3661 for node in myblocker_uninstalls.root_nodes():
3662 unsolvable_blockers.add(node)
3664 for blocker in unsolvable_blockers:
3665 retlist.append(blocker)
3667 # If any Uninstall tasks need to be executed in order
3668 # to avoid a conflict, complete the graph with any
3669 # dependencies that may have been initially
3670 # neglected (to ensure that unsafe Uninstall tasks
3671 # are properly identified and blocked from execution).
3672 if have_uninstall_task and \
3674 not unsolvable_blockers:
3675 self._dynamic_config.myparams["complete"] = True
3676 raise self._serialize_tasks_retry("")
3678 if unsolvable_blockers and \
3679 not self._accept_blocker_conflicts():
3680 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
3681 self._dynamic_config._serialized_tasks_cache = retlist[:]
3682 self._dynamic_config._scheduler_graph = scheduler_graph
3683 raise self._unknown_internal_error()
3685 if self._dynamic_config._slot_collision_info and \
3686 not self._accept_blocker_conflicts():
3687 self._dynamic_config._serialized_tasks_cache = retlist[:]
3688 self._dynamic_config._scheduler_graph = scheduler_graph
3689 raise self._unknown_internal_error()
3691 return retlist, scheduler_graph
3693 def _show_circular_deps(self, mygraph):
3694 # No leaf nodes are available, so we have a circular
3695 # dependency panic situation. Reduce the noise level to a
3696 # minimum via repeated elimination of root nodes since they
3697 # have no parents and thus can not be part of a cycle.
3699 root_nodes = mygraph.root_nodes(
3700 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3703 mygraph.difference_update(root_nodes)
3704 # Display the USE flags that are enabled on nodes that are part
3705 # of dependency cycles in case that helps the user decide to
3706 # disable some of them.
3708 tempgraph = mygraph.copy()
3709 while not tempgraph.empty():
3710 nodes = tempgraph.leaf_nodes()
3712 node = tempgraph.order[0]
3715 display_order.append(node)
3716 tempgraph.remove(node)
3717 display_order.reverse()
3718 self._frozen_config.myopts.pop("--quiet", None)
3719 self._frozen_config.myopts.pop("--verbose", None)
3720 self._frozen_config.myopts["--tree"] = True
3721 portage.writemsg("\n\n", noiselevel=-1)
3722 self.display(display_order)
3723 prefix = colorize("BAD", " * ")
3724 portage.writemsg("\n", noiselevel=-1)
3725 portage.writemsg(prefix + "Error: circular dependencies:\n",
3727 portage.writemsg("\n", noiselevel=-1)
3728 mygraph.debug_print()
3729 portage.writemsg("\n", noiselevel=-1)
3730 portage.writemsg(prefix + "Note that circular dependencies " + \
3731 "can often be avoided by temporarily\n", noiselevel=-1)
3732 portage.writemsg(prefix + "disabling USE flags that trigger " + \
3733 "optional dependencies.\n", noiselevel=-1)
3735 def _show_merge_list(self):
3736 if self._dynamic_config._serialized_tasks_cache is not None and \
3737 not (self._dynamic_config._displayed_list and \
3738 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
3739 self._dynamic_config._displayed_list == \
3740 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
3741 display_list = self._dynamic_config._serialized_tasks_cache[:]
3742 if "--tree" in self._frozen_config.myopts:
3743 display_list.reverse()
3744 self.display(display_list)
3746 def _show_unsatisfied_blockers(self, blockers):
3747 self._show_merge_list()
3748 msg = "Error: The above package list contains " + \
3749 "packages which cannot be installed " + \
3750 "at the same time on the same system."
3751 prefix = colorize("BAD", " * ")
3752 from textwrap import wrap
3753 portage.writemsg("\n", noiselevel=-1)
3754 for line in wrap(msg, 70):
3755 portage.writemsg(prefix + line + "\n", noiselevel=-1)
3757 # Display the conflicting packages along with the packages
3758 # that pulled them in. This is helpful for troubleshooting
3759 # cases in which blockers don't solve automatically and
3760 # the reasons are not apparent from the normal merge list
3764 for blocker in blockers:
3765 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
3766 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
3767 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3768 if not parent_atoms:
3769 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
3770 if atom is not None:
3771 parent_atoms = set([("@world", atom)])
3773 conflict_pkgs[pkg] = parent_atoms
3776 # Reduce noise by pruning packages that are only
3777 # pulled in by other conflict packages.
3779 for pkg, parent_atoms in conflict_pkgs.items():
3780 relevant_parent = False
3781 for parent, atom in parent_atoms:
3782 if parent not in conflict_pkgs:
3783 relevant_parent = True
3785 if not relevant_parent:
3786 pruned_pkgs.add(pkg)
3787 for pkg in pruned_pkgs:
3788 del conflict_pkgs[pkg]
3794 # Max number of parents shown, to avoid flooding the display.
3796 for pkg, parent_atoms in conflict_pkgs.items():
3800 # Prefer packages that are not directly involved in a conflict.
3801 for parent_atom in parent_atoms:
3802 if len(pruned_list) >= max_parents:
3804 parent, atom = parent_atom
3805 if parent not in conflict_pkgs:
3806 pruned_list.add(parent_atom)
3808 for parent_atom in parent_atoms:
3809 if len(pruned_list) >= max_parents:
3811 pruned_list.add(parent_atom)
3813 omitted_parents = len(parent_atoms) - len(pruned_list)
3814 msg.append(indent + "%s pulled in by\n" % pkg)
3816 for parent_atom in pruned_list:
3817 parent, atom = parent_atom
3818 msg.append(2*indent)
3819 if isinstance(parent,
3820 (PackageArg, AtomArg)):
3821 # For PackageArg and AtomArg types, it's
3822 # redundant to display the atom attribute.
3823 msg.append(str(parent))
3825 # Display the specific atom from SetArg or
3827 msg.append("%s required by %s" % (atom, parent))
3831 msg.append(2*indent)
3832 msg.append("(and %d more)\n" % omitted_parents)
3836 sys.stderr.write("".join(msg))
3839 if "--quiet" not in self._frozen_config.myopts:
3840 show_blocker_docs_link()
3842 def display(self, mylist, favorites=[], verbosity=None):
3844 # This is used to prevent display_problems() from
3845 # redundantly displaying this exact same merge list
3846 # again via _show_merge_list().
3847 self._dynamic_config._displayed_list = mylist
3849 if verbosity is None:
3850 verbosity = ("--quiet" in self._frozen_config.myopts and 1 or \
3851 "--verbose" in self._frozen_config.myopts and 3 or 2)
3852 favorites_set = InternalPackageSet(favorites)
3853 oneshot = "--oneshot" in self._frozen_config.myopts or \
3854 "--onlydeps" in self._frozen_config.myopts
3855 columns = "--columns" in self._frozen_config.myopts
3860 counters = PackageCounters()
3862 if verbosity == 1 and "--verbose" not in self._frozen_config.myopts:
3863 def create_use_string(*args):
3866 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3868 is_new, reinst_flags,
3869 all_flags=(verbosity == 3 or "--quiet" in self._frozen_config.myopts),
3870 alphabetical=("--alphabetical" in self._frozen_config.myopts)):
3878 cur_iuse = set(cur_iuse)
3879 enabled_flags = cur_iuse.intersection(cur_use)
3880 removed_iuse = set(old_iuse).difference(cur_iuse)
3881 any_iuse = cur_iuse.union(old_iuse)
3882 any_iuse = list(any_iuse)
3884 for flag in any_iuse:
3887 reinst_flag = reinst_flags and flag in reinst_flags
3888 if flag in enabled_flags:
3890 if is_new or flag in old_use and \
3891 (all_flags or reinst_flag):
3892 flag_str = red(flag)
3893 elif flag not in old_iuse:
3894 flag_str = yellow(flag) + "%*"
3895 elif flag not in old_use:
3896 flag_str = green(flag) + "*"
3897 elif flag in removed_iuse:
3898 if all_flags or reinst_flag:
3899 flag_str = yellow("-" + flag) + "%"
3902 flag_str = "(" + flag_str + ")"
3903 removed.append(flag_str)
3906 if is_new or flag in old_iuse and \
3907 flag not in old_use and \
3908 (all_flags or reinst_flag):
3909 flag_str = blue("-" + flag)
3910 elif flag not in old_iuse:
3911 flag_str = yellow("-" + flag)
3912 if flag not in iuse_forced:
3914 elif flag in old_use:
3915 flag_str = green("-" + flag) + "*"
3917 if flag in iuse_forced:
3918 flag_str = "(" + flag_str + ")"
3920 enabled.append(flag_str)
3922 disabled.append(flag_str)
3925 ret = " ".join(enabled)
3927 ret = " ".join(enabled + disabled + removed)
3929 ret = '%s="%s" ' % (name, ret)
3932 repo_display = RepoDisplay(self._frozen_config.roots)
3936 mygraph = self._dynamic_config.digraph.copy()
3938 # If there are any Uninstall instances, add the corresponding
3939 # blockers to the digraph (useful for --tree display).
3941 executed_uninstalls = set(node for node in mylist \
3942 if isinstance(node, Package) and node.operation == "unmerge")
3944 for uninstall in self._dynamic_config._blocker_uninstalls.leaf_nodes():
3945 uninstall_parents = \
3946 self._dynamic_config._blocker_uninstalls.parent_nodes(uninstall)
3947 if not uninstall_parents:
3950 # Remove the corresponding "nomerge" node and substitute
3951 # the Uninstall node.
3952 inst_pkg = self._pkg(uninstall.cpv, "installed",
3953 uninstall.root_config, installed=True)
3956 mygraph.remove(inst_pkg)
3961 inst_pkg_blockers = self._dynamic_config._blocker_parents.child_nodes(inst_pkg)
3963 inst_pkg_blockers = []
3965 # Break the Package -> Uninstall edges.
3966 mygraph.remove(uninstall)
3968 # Resolution of a package's blockers
3969 # depend on it's own uninstallation.
3970 for blocker in inst_pkg_blockers:
3971 mygraph.add(uninstall, blocker)
3973 # Expand Package -> Uninstall edges into
3974 # Package -> Blocker -> Uninstall edges.
3975 for blocker in uninstall_parents:
3976 mygraph.add(uninstall, blocker)
3977 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3978 if parent != inst_pkg:
3979 mygraph.add(blocker, parent)
3981 # If the uninstall task did not need to be executed because
3982 # of an upgrade, display Blocker -> Upgrade edges since the
3983 # corresponding Blocker -> Uninstall edges will not be shown.
3985 self._dynamic_config._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
3986 if upgrade_node is not None and \
3987 uninstall not in executed_uninstalls:
3988 for blocker in uninstall_parents:
3989 mygraph.add(upgrade_node, blocker)
3991 unsatisfied_blockers = []
3996 if isinstance(x, Blocker) and not x.satisfied:
3997 unsatisfied_blockers.append(x)
4000 if "--tree" in self._frozen_config.myopts:
4001 depth = len(tree_nodes)
4002 while depth and graph_key not in \
4003 mygraph.child_nodes(tree_nodes[depth-1]):
4006 tree_nodes = tree_nodes[:depth]
4007 tree_nodes.append(graph_key)
4008 display_list.append((x, depth, True))
4009 shown_edges.add((graph_key, tree_nodes[depth-1]))
4011 traversed_nodes = set() # prevent endless circles
4012 traversed_nodes.add(graph_key)
4013 def add_parents(current_node, ordered):
4015 # Do not traverse to parents if this node is an
4016 # an argument or a direct member of a set that has
4017 # been specified as an argument (system or world).
4018 if current_node not in self._dynamic_config._set_nodes:
4019 parent_nodes = mygraph.parent_nodes(current_node)
4021 child_nodes = set(mygraph.child_nodes(current_node))
4022 selected_parent = None
4023 # First, try to avoid a direct cycle.
4024 for node in parent_nodes:
4025 if not isinstance(node, (Blocker, Package)):
4027 if node not in traversed_nodes and \
4028 node not in child_nodes:
4029 edge = (current_node, node)
4030 if edge in shown_edges:
4032 selected_parent = node
4034 if not selected_parent:
4035 # A direct cycle is unavoidable.
4036 for node in parent_nodes:
4037 if not isinstance(node, (Blocker, Package)):
4039 if node not in traversed_nodes:
4040 edge = (current_node, node)
4041 if edge in shown_edges:
4043 selected_parent = node
4046 shown_edges.add((current_node, selected_parent))
4047 traversed_nodes.add(selected_parent)
4048 add_parents(selected_parent, False)
4049 display_list.append((current_node,
4050 len(tree_nodes), ordered))
4051 tree_nodes.append(current_node)
4053 add_parents(graph_key, True)
4055 display_list.append((x, depth, True))
4056 mylist = display_list
4057 for x in unsatisfied_blockers:
4058 mylist.append((x, 0, True))
4060 last_merge_depth = 0
4061 for i in range(len(mylist)-1,-1,-1):
4062 graph_key, depth, ordered = mylist[i]
4063 if not ordered and depth == 0 and i > 0 \
4064 and graph_key == mylist[i-1][0] and \
4065 mylist[i-1][1] == 0:
4066 # An ordered node got a consecutive duplicate when the tree was
4070 if ordered and graph_key[-1] != "nomerge":
4071 last_merge_depth = depth
4073 if depth >= last_merge_depth or \
4074 i < len(mylist) - 1 and \
4075 depth >= mylist[i+1][1]:
4078 # files to fetch list - avoids counting a same file twice
4079 # in size display (verbose mode)
4082 # Use this set to detect when all the "repoadd" strings are "[0]"
4083 # and disable the entire repo display in this case.
4086 for mylist_index in range(len(mylist)):
4087 x, depth, ordered = mylist[mylist_index]
4091 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4092 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
4093 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4094 vartree = self._frozen_config.trees[myroot]["vartree"]
4095 pkgsettings = self._frozen_config.pkgsettings[myroot]
4098 indent = " " * depth
4100 if isinstance(x, Blocker):
4102 blocker_style = "PKG_BLOCKER_SATISFIED"
4103 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4105 blocker_style = "PKG_BLOCKER"
4106 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4108 counters.blocks += 1
4110 counters.blocks_satisfied += 1
4111 resolved = portage.dep_expand(
4112 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
4113 if "--columns" in self._frozen_config.myopts and "--quiet" in self._frozen_config.myopts:
4114 addl += " " + colorize(blocker_style, str(resolved))
4116 addl = "[%s %s] %s%s" % \
4117 (colorize(blocker_style, "blocks"),
4118 addl, indent, colorize(blocker_style, str(resolved)))
4119 block_parents = self._dynamic_config._blocker_parents.parent_nodes(x)
4120 block_parents = set([pnode[2] for pnode in block_parents])
4121 block_parents = ", ".join(block_parents)
4123 addl += colorize(blocker_style,
4124 " (\"%s\" is blocking %s)") % \
4125 (str(x.atom).lstrip("!"), block_parents)
4127 addl += colorize(blocker_style,
4128 " (is blocking %s)") % block_parents
4129 if isinstance(x, Blocker) and x.satisfied:
4134 blockers.append(addl)
4137 pkg_merge = ordered and pkg_status == "merge"
4138 if not pkg_merge and pkg_status == "merge":
4139 pkg_status = "nomerge"
4140 built = pkg_type != "ebuild"
4141 installed = pkg_type == "installed"
4143 metadata = pkg.metadata
4145 repo_name = metadata["repository"]
4146 if pkg.type_name == "ebuild":
4147 ebuild_path = portdb.findname(pkg.cpv)
4148 if ebuild_path is None:
4149 raise AssertionError(
4150 "ebuild not found for '%s'" % pkg.cpv)
4151 repo_path_real = os.path.dirname(os.path.dirname(
4152 os.path.dirname(ebuild_path)))
4154 repo_path_real = portdb.getRepositoryPath(repo_name)
4155 pkg_use = list(pkg.use.enabled)
4156 if not pkg.built and pkg.operation == 'merge' and \
4157 'fetch' in pkg.metadata.restrict:
4160 counters.restrict_fetch += 1
4161 if portdb.fetch_check(pkg_key, pkg_use):
4164 counters.restrict_fetch_satisfied += 1
4166 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4167 #param is used for -u, where you still *do* want to see when something is being upgraded.
4170 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4171 if vardb.cpv_exists(pkg_key):
4172 addl=" "+yellow("R")+fetch+" "
4175 counters.reinst += 1
4176 if pkg_type == "binary":
4177 counters.binary += 1
4178 elif pkg_status == "uninstall":
4179 counters.uninst += 1
4180 # filter out old-style virtual matches
4181 elif installed_versions and \
4182 portage.cpv_getkey(installed_versions[0]) == \
4183 portage.cpv_getkey(pkg_key):
4184 myinslotlist = vardb.match(pkg.slot_atom)
4185 # If this is the first install of a new-style virtual, we
4186 # need to filter out old-style virtual matches.
4187 if myinslotlist and \
4188 portage.cpv_getkey(myinslotlist[0]) != \
4189 portage.cpv_getkey(pkg_key):
4192 myoldbest = myinslotlist[:]
4194 if not portage.dep.cpvequal(pkg_key,
4195 portage.best([pkg_key] + myoldbest)):
4197 addl += turquoise("U")+blue("D")
4199 counters.downgrades += 1
4200 if pkg_type == "binary":
4201 counters.binary += 1
4204 addl += turquoise("U") + " "
4206 counters.upgrades += 1
4207 if pkg_type == "binary":
4208 counters.binary += 1
4210 # New slot, mark it new.
4211 addl = " " + green("NS") + fetch + " "
4212 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4214 counters.newslot += 1
4215 if pkg_type == "binary":
4216 counters.binary += 1
4218 if "--changelog" in self._frozen_config.myopts:
4219 inst_matches = vardb.match(pkg.slot_atom)
4221 ebuild_path_cl = ebuild_path
4222 if ebuild_path_cl is None:
4224 ebuild_path_cl = portdb.findname(pkg.cpv)
4226 if ebuild_path_cl is not None:
4227 changelogs.extend(calc_changelog(
4228 ebuild_path_cl, inst_matches[0], pkg.cpv))
4230 addl = " " + green("N") + " " + fetch + " "
4233 if pkg_type == "binary":
4234 counters.binary += 1
4241 forced_flags = set()
4242 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
4243 forced_flags.update(pkgsettings.useforce)
4244 forced_flags.update(pkgsettings.usemask)
4246 cur_use = [flag for flag in pkg.use.enabled \
4247 if flag in pkg.iuse.all]
4248 cur_iuse = sorted(pkg.iuse.all)
4250 if myoldbest and myinslotlist:
4251 previous_cpv = myoldbest[0]
4253 previous_cpv = pkg.cpv
4254 if vardb.cpv_exists(previous_cpv):
4255 old_iuse, old_use = vardb.aux_get(
4256 previous_cpv, ["IUSE", "USE"])
4257 old_iuse = list(set(
4258 filter_iuse_defaults(old_iuse.split())))
4260 old_use = old_use.split()
4267 old_use = [flag for flag in old_use if flag in old_iuse]
4269 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4271 use_expand.reverse()
4272 use_expand_hidden = \
4273 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4275 def map_to_use_expand(myvals, forcedFlags=False,
4279 for exp in use_expand:
4282 for val in myvals[:]:
4283 if val.startswith(exp.lower()+"_"):
4284 if val in forced_flags:
4285 forced[exp].add(val[len(exp)+1:])
4286 ret[exp].append(val[len(exp)+1:])
4289 forced["USE"] = [val for val in myvals \
4290 if val in forced_flags]
4292 for exp in use_expand_hidden:
4298 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4299 # are the only thing that triggered reinstallation.
4300 reinst_flags_map = {}
4301 reinstall_for_flags = self._dynamic_config._reinstall_nodes.get(pkg)
4302 reinst_expand_map = None
4303 if reinstall_for_flags:
4304 reinst_flags_map = map_to_use_expand(
4305 list(reinstall_for_flags), removeHidden=False)
4306 for k in list(reinst_flags_map):
4307 if not reinst_flags_map[k]:
4308 del reinst_flags_map[k]
4309 if not reinst_flags_map.get("USE"):
4310 reinst_expand_map = reinst_flags_map.copy()
4311 reinst_expand_map.pop("USE", None)
4312 if reinst_expand_map and \
4313 not set(reinst_expand_map).difference(
4315 use_expand_hidden = \
4316 set(use_expand_hidden).difference(
4319 cur_iuse_map, iuse_forced = \
4320 map_to_use_expand(cur_iuse, forcedFlags=True)
4321 cur_use_map = map_to_use_expand(cur_use)
4322 old_iuse_map = map_to_use_expand(old_iuse)
4323 old_use_map = map_to_use_expand(old_use)
4326 use_expand.insert(0, "USE")
4328 for key in use_expand:
4329 if key in use_expand_hidden:
4331 verboseadd += create_use_string(key.upper(),
4332 cur_iuse_map[key], iuse_forced[key],
4333 cur_use_map[key], old_iuse_map[key],
4334 old_use_map[key], is_new,
4335 reinst_flags_map.get(key))
4340 if pkg_type == "ebuild" and pkg_merge:
4342 myfilesdict = portdb.getfetchsizes(pkg_key,
4343 useflags=pkg_use, debug=self._frozen_config.edebug)
4344 except portage.exception.InvalidDependString as e:
4345 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4346 show_invalid_depstring_notice(x, src_uri, str(e))
4349 if myfilesdict is None:
4350 myfilesdict="[empty/missing/bad digest]"
4352 for myfetchfile in myfilesdict:
4353 if myfetchfile not in myfetchlist:
4354 mysize+=myfilesdict[myfetchfile]
4355 myfetchlist.append(myfetchfile)
4357 counters.totalsize += mysize
4358 verboseadd += format_size(mysize)
4361 # assign index for a previous version in the same slot
4362 has_previous = False
4363 repo_name_prev = None
4364 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4366 slot_matches = vardb.match(slot_atom)
4369 repo_name_prev = vardb.aux_get(slot_matches[0],
4372 # now use the data to generate output
4373 if pkg.installed or not has_previous:
4374 repoadd = repo_display.repoStr(repo_path_real)
4376 repo_path_prev = None
4378 repo_path_prev = portdb.getRepositoryPath(
4380 if repo_path_prev == repo_path_real:
4381 repoadd = repo_display.repoStr(repo_path_real)
4383 repoadd = "%s=>%s" % (
4384 repo_display.repoStr(repo_path_prev),
4385 repo_display.repoStr(repo_path_real))
4387 repoadd_set.add(repoadd)
4389 xs = [portage.cpv_getkey(pkg_key)] + \
4390 list(portage.catpkgsplit(pkg_key)[2:])
4397 if "COLUMNWIDTH" in self._frozen_config.settings:
4399 mywidth = int(self._frozen_config.settings["COLUMNWIDTH"])
4400 except ValueError as e:
4401 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4403 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4404 self._frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
4406 oldlp = mywidth - 30
4409 # Convert myoldbest from a list to a string.
4413 for pos, key in enumerate(myoldbest):
4414 key = portage.catpkgsplit(key)[2] + \
4415 "-" + portage.catpkgsplit(key)[3]
4416 if key[-3:] == "-r0":
4418 myoldbest[pos] = key
4419 myoldbest = blue("["+", ".join(myoldbest)+"]")
4422 root_config = self._frozen_config.roots[myroot]
4423 system_set = root_config.sets["system"]
4424 world_set = root_config.sets["world"]
4429 pkg_system = system_set.findAtomForPackage(pkg)
4430 pkg_world = world_set.findAtomForPackage(pkg)
4431 if not (oneshot or pkg_world) and \
4432 myroot == self._frozen_config.target_root and \
4433 favorites_set.findAtomForPackage(pkg):
4434 # Maybe it will be added to world now.
4435 if create_world_atom(pkg, favorites_set, root_config):
4437 except portage.exception.InvalidDependString:
4438 # This is reported elsewhere if relevant.
4441 def pkgprint(pkg_str):
4444 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4446 return colorize("PKG_MERGE_WORLD", pkg_str)
4448 return colorize("PKG_MERGE", pkg_str)
4449 elif pkg_status == "uninstall":
4450 return colorize("PKG_UNINSTALL", pkg_str)
4453 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4455 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4457 return colorize("PKG_NOMERGE", pkg_str)
4459 if 'interactive' in pkg.metadata.properties and \
4460 pkg.operation == 'merge':
4461 addl = colorize("WARN", "I") + addl[1:]
4463 counters.interactive += 1
4468 if "--columns" in self._frozen_config.myopts:
4469 if "--quiet" in self._frozen_config.myopts:
4470 myprint=addl+" "+indent+pkgprint(pkg_cp)
4471 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4472 myprint=myprint+myoldbest
4473 myprint=myprint+darkgreen("to "+x[1])
4477 myprint = "[%s] %s%s" % \
4478 (pkgprint(pkg_status.ljust(13)),
4479 indent, pkgprint(pkg.cp))
4481 myprint = "[%s %s] %s%s" % \
4482 (pkgprint(pkg.type_name), addl,
4483 indent, pkgprint(pkg.cp))
4484 if (newlp-nc_len(myprint)) > 0:
4485 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4486 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4487 if (oldlp-nc_len(myprint)) > 0:
4488 myprint=myprint+" "*(oldlp-nc_len(myprint))
4489 myprint=myprint+myoldbest
4490 myprint += darkgreen("to " + pkg.root)
4493 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4495 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4496 myprint += indent + pkgprint(pkg_key) + " " + \
4497 myoldbest + darkgreen("to " + myroot)
4499 if "--columns" in self._frozen_config.myopts:
4500 if "--quiet" in self._frozen_config.myopts:
4501 myprint=addl+" "+indent+pkgprint(pkg_cp)
4502 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4503 myprint=myprint+myoldbest
4507 myprint = "[%s] %s%s" % \
4508 (pkgprint(pkg_status.ljust(13)),
4509 indent, pkgprint(pkg.cp))
4511 myprint = "[%s %s] %s%s" % \
4512 (pkgprint(pkg.type_name), addl,
4513 indent, pkgprint(pkg.cp))
4514 if (newlp-nc_len(myprint)) > 0:
4515 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4516 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4517 if (oldlp-nc_len(myprint)) > 0:
4518 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4519 myprint += myoldbest
4522 myprint = "[%s] %s%s %s" % \
4523 (pkgprint(pkg_status.ljust(13)),
4524 indent, pkgprint(pkg.cpv),
4527 myprint = "[%s %s] %s%s %s" % \
4528 (pkgprint(pkg_type), addl, indent,
4529 pkgprint(pkg.cpv), myoldbest)
4531 if columns and pkg.operation == "uninstall":
4533 p.append((myprint, verboseadd, repoadd))
4535 if "--tree" not in self._frozen_config.myopts and \
4536 "--quiet" not in self._frozen_config.myopts and \
4537 not self._frozen_config._opts_no_restart.intersection(self._frozen_config.myopts) and \
4538 pkg.root == self._frozen_config._running_root.root and \
4539 portage.match_from_list(
4540 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4541 not vardb.cpv_exists(pkg.cpv) and \
4542 "--quiet" not in self._frozen_config.myopts:
4543 if mylist_index < len(mylist) - 1:
4544 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4545 p.append(colorize("WARN", " then resume the merge."))
4548 show_repos = repoadd_set and repoadd_set != set(["0"])
4551 if isinstance(x, basestring):
4552 out.write("%s\n" % (x,))
4555 myprint, verboseadd, repoadd = x
4558 myprint += " " + verboseadd
4560 if show_repos and repoadd:
4561 myprint += " " + teal("[%s]" % repoadd)
4563 out.write("%s\n" % (myprint,))
4572 # In python-2.x, str() can trigger a UnicodeEncodeError here,
4573 # so call __str__() directly.
4574 writemsg_stdout(repo_display.__str__(), noiselevel=-1)
4576 if "--changelog" in self._frozen_config.myopts:
4577 writemsg_stdout('\n', noiselevel=-1)
4578 for revision,text in changelogs:
4579 writemsg_stdout(bold('*'+revision) + '\n' + text,
4585 def display_problems(self):
4587 Display problems with the dependency graph such as slot collisions.
4588 This is called internally by display() to show the problems _after_
4589 the merge list where it is most likely to be seen, but if display()
4590 is not going to be called then this method should be called explicitly
4591 to ensure that the user is notified of problems with the graph.
4593 All output goes to stderr, except for unsatisfied dependencies which
4594 go to stdout for parsing by programs such as autounmask.
4597 # Note that show_masked_packages() sends it's output to
4598 # stdout, and some programs such as autounmask parse the
4599 # output in cases when emerge bails out. However, when
4600 # show_masked_packages() is called for installed packages
4601 # here, the message is a warning that is more appropriate
4602 # to send to stderr, so temporarily redirect stdout to
4603 # stderr. TODO: Fix output code so there's a cleaner way
4604 # to redirect everything to stderr.
4609 sys.stdout = sys.stderr
4610 self._display_problems()
4616 # This goes to stdout for parsing by programs like autounmask.
4617 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4618 self._show_unsatisfied_dep(*pargs, **kwargs)
4620 def _display_problems(self):
4621 if self._dynamic_config._circular_deps_for_display is not None:
4622 self._show_circular_deps(
4623 self._dynamic_config._circular_deps_for_display)
4625 # The user is only notified of a slot conflict if
4626 # there are no unresolvable blocker conflicts.
4627 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4628 self._show_unsatisfied_blockers(
4629 self._dynamic_config._unsatisfied_blockers_for_display)
4630 elif self._dynamic_config._slot_collision_info:
4631 self._show_slot_collision_notice()
4633 self._show_missed_update()
4635 # TODO: Add generic support for "set problem" handlers so that
4636 # the below warnings aren't special cases for world only.
4638 if self._dynamic_config._missing_args:
4639 world_problems = False
4640 if "world" in self._dynamic_config._sets:
4641 # Filter out indirect members of world (from nested sets)
4642 # since only direct members of world are desired here.
4643 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["world"]
4644 for arg, atom in self._dynamic_config._missing_args:
4645 if arg.name == "world" and atom in world_set:
4646 world_problems = True
4650 sys.stderr.write("\n!!! Problems have been " + \
4651 "detected with your world file\n")
4652 sys.stderr.write("!!! Please run " + \
4653 green("emaint --check world")+"\n\n")
4655 if self._dynamic_config._missing_args:
4656 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4657 " Ebuilds for the following packages are either all\n")
4658 sys.stderr.write(colorize("BAD", "!!!") + \
4659 " masked or don't exist:\n")
4660 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4661 self._dynamic_config._missing_args) + "\n")
4663 if self._dynamic_config._pprovided_args:
4665 for arg, atom in self._dynamic_config._pprovided_args:
4666 if isinstance(arg, SetArg):
4668 arg_atom = (atom, atom)
4671 arg_atom = (arg.arg, atom)
4672 refs = arg_refs.setdefault(arg_atom, [])
4673 if parent not in refs:
4676 msg.append(bad("\nWARNING: "))
4677 if len(self._dynamic_config._pprovided_args) > 1:
4678 msg.append("Requested packages will not be " + \
4679 "merged because they are listed in\n")
4681 msg.append("A requested package will not be " + \
4682 "merged because it is listed in\n")
4683 msg.append("package.provided:\n\n")
4684 problems_sets = set()
4685 for (arg, atom), refs in arg_refs.items():
4688 problems_sets.update(refs)
4690 ref_string = ", ".join(["'%s'" % name for name in refs])
4691 ref_string = " pulled in by " + ref_string
4692 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4694 if "world" in problems_sets:
4695 msg.append("This problem can be solved in one of the following ways:\n\n")
4696 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4697 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4698 msg.append(" C) Remove offending entries from package.provided.\n\n")
4699 msg.append("The best course of action depends on the reason that an offending\n")
4700 msg.append("package.provided entry exists.\n\n")
4701 sys.stderr.write("".join(msg))
4703 masked_packages = []
4704 for pkg in self._dynamic_config._masked_installed:
4705 root_config = pkg.root_config
4706 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4707 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4708 masked_packages.append((root_config, pkgsettings,
4709 pkg.cpv, pkg.metadata, mreasons))
4711 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4712 " The following installed packages are masked:\n")
4713 show_masked_packages(masked_packages)
4717 def saveNomergeFavorites(self):
4718 """Find atoms in favorites that are not in the mergelist and add them
4719 to the world file if necessary."""
4720 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4721 "--oneshot", "--onlydeps", "--pretend"):
4722 if x in self._frozen_config.myopts:
4724 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4725 world_set = root_config.sets["world"]
4727 world_locked = False
4728 if hasattr(world_set, "lock"):
4732 if hasattr(world_set, "load"):
4733 world_set.load() # maybe it's changed on disk
4735 args_set = self._dynamic_config._sets["args"]
4736 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
4737 added_favorites = set()
4738 for x in self._dynamic_config._set_nodes:
4739 pkg_type, root, pkg_key, pkg_status = x
4740 if pkg_status != "nomerge":
4744 myfavkey = create_world_atom(x, args_set, root_config)
4746 if myfavkey in added_favorites:
4748 added_favorites.add(myfavkey)
4749 except portage.exception.InvalidDependString as e:
4750 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4751 (pkg_key, str(e)), noiselevel=-1)
4752 writemsg("!!! see '%s'\n\n" % os.path.join(
4753 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4756 for k in self._dynamic_config._sets:
4757 if k in ("args", "world") or not root_config.sets[k].world_candidate:
4762 all_added.append(SETPREFIX + k)
4763 all_added.extend(added_favorites)
4766 print(">>> Recording %s in \"world\" favorites file..." % \
4767 colorize("INFORM", str(a)))
4769 world_set.update(all_added)
4774 def _loadResumeCommand(self, resume_data, skip_masked=True,
4777 Add a resume command to the graph and validate it in the process. This
4778 will raise a PackageNotFound exception if a package is not available.
4781 if not isinstance(resume_data, dict):
4784 mergelist = resume_data.get("mergelist")
4785 if not isinstance(mergelist, list):
4788 fakedb = self._dynamic_config.mydbapi
4789 trees = self._frozen_config.trees
4790 serialized_tasks = []
4793 if not (isinstance(x, list) and len(x) == 4):
4795 pkg_type, myroot, pkg_key, action = x
4796 if pkg_type not in self.pkg_tree_map:
4798 if action != "merge":
4800 root_config = self._frozen_config.roots[myroot]
4802 pkg = self._pkg(pkg_key, pkg_type, root_config)
4803 except portage.exception.PackageNotFound:
4804 # It does no exist or it is corrupt.
4806 # TODO: log these somewhere
4810 if "merge" == pkg.operation and \
4811 not visible(root_config.settings, pkg):
4813 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4815 self._dynamic_config._unsatisfied_deps_for_display.append(
4816 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
4818 fakedb[myroot].cpv_inject(pkg)
4819 serialized_tasks.append(pkg)
4820 self._spinner_update()
4822 if self._dynamic_config._unsatisfied_deps_for_display:
4825 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
4826 self._dynamic_config._serialized_tasks_cache = serialized_tasks
4827 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
4829 self._select_package = self._select_pkg_from_graph
4830 self._dynamic_config.myparams["selective"] = True
4831 # Always traverse deep dependencies in order to account for
4832 # potentially unsatisfied dependencies of installed packages.
4833 # This is necessary for correct --keep-going or --resume operation
4834 # in case a package from a group of circularly dependent packages
4835 # fails. In this case, a package which has recently been installed
4836 # may have an unsatisfied circular dependency (pulled in by
4837 # PDEPEND, for example). So, even though a package is already
4838 # installed, it may not have all of it's dependencies satisfied, so
4839 # it may not be usable. If such a package is in the subgraph of
4840 # deep depenedencies of a scheduled build, that build needs to
4841 # be cancelled. In order for this type of situation to be
4842 # recognized, deep traversal of dependencies is required.
4843 self._dynamic_config.myparams["deep"] = True
4845 favorites = resume_data.get("favorites")
4846 args_set = self._dynamic_config._sets["args"]
4847 if isinstance(favorites, list):
4848 args = self._load_favorites(favorites)
4852 for task in serialized_tasks:
4853 if isinstance(task, Package) and \
4854 task.operation == "merge":
4855 if not self._add_pkg(task, None):
4858 # Packages for argument atoms need to be explicitly
4859 # added via _add_pkg() so that they are included in the
4860 # digraph (needed at least for --tree display).
4862 for atom in arg.set:
4863 pkg, existing_node = self._select_package(
4864 arg.root_config.root, atom)
4865 if existing_node is None and \
4867 if not self._add_pkg(pkg, Dependency(atom=atom,
4868 root=pkg.root, parent=arg)):
4871 # Allow unsatisfied deps here to avoid showing a masking
4872 # message for an unsatisfied dep that isn't necessarily
4874 if not self._create_graph(allow_unsatisfied=True):
4877 unsatisfied_deps = []
4878 for dep in self._dynamic_config._unsatisfied_deps:
4879 if not isinstance(dep.parent, Package):
4881 if dep.parent.operation == "merge":
4882 unsatisfied_deps.append(dep)
4885 # For unsatisfied deps of installed packages, only account for
4886 # them if they are in the subgraph of dependencies of a package
4887 # which is scheduled to be installed.
4888 unsatisfied_install = False
4890 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
4892 node = dep_stack.pop()
4893 if not isinstance(node, Package):
4895 if node.operation == "merge":
4896 unsatisfied_install = True
4898 if node in traversed:
4901 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
4903 if unsatisfied_install:
4904 unsatisfied_deps.append(dep)
4906 if masked_tasks or unsatisfied_deps:
4907 # This probably means that a required package
4908 # was dropped via --skipfirst. It makes the
4909 # resume list invalid, so convert it to a
4910 # UnsatisfiedResumeDep exception.
4911 raise self.UnsatisfiedResumeDep(self,
4912 masked_tasks + unsatisfied_deps)
4913 self._dynamic_config._serialized_tasks_cache = None
4916 except self._unknown_internal_error:
4921 def _load_favorites(self, favorites):
4923 Use a list of favorites to resume state from a
4924 previous select_files() call. This creates similar
4925 DependencyArg instances to those that would have
4926 been created by the original select_files() call.
4927 This allows Package instances to be matched with
4928 DependencyArg instances during graph creation.
4930 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4931 getSetAtoms = root_config.setconfig.getSetAtoms
4932 sets = root_config.sets
4935 if not isinstance(x, basestring):
4937 if x in ("system", "world"):
4939 if x.startswith(SETPREFIX):
4940 s = x[len(SETPREFIX):]
4943 if s in self._dynamic_config._sets:
4945 # Recursively expand sets so that containment tests in
4946 # self._get_parent_sets() properly match atoms in nested
4947 # sets (like if world contains system).
4948 expanded_set = InternalPackageSet(
4949 initial_atoms=getSetAtoms(s))
4950 self._dynamic_config._sets[s] = expanded_set
4951 args.append(SetArg(arg=x, set=expanded_set,
4952 root_config=root_config))
4956 except portage.exception.InvalidAtom:
4958 args.append(AtomArg(arg=x, atom=x,
4959 root_config=root_config))
4961 self._set_args(args)
4964 class UnsatisfiedResumeDep(portage.exception.PortageException):
4966 A dependency of a resume list is not installed. This
4967 can occur when a required package is dropped from the
4968 merge list via --skipfirst.
4970 def __init__(self, depgraph, value):
4971 portage.exception.PortageException.__init__(self, value)
4972 self.depgraph = depgraph
4974 class _internal_exception(portage.exception.PortageException):
4975 def __init__(self, value=""):
4976 portage.exception.PortageException.__init__(self, value)
4978 class _unknown_internal_error(_internal_exception):
4980 Used by the depgraph internally to terminate graph creation.
4981 The specific reason for the failure should have been dumped
4982 to stderr, unfortunately, the exact reason for the failure
4986 class _serialize_tasks_retry(_internal_exception):
4988 This is raised by the _serialize_tasks() method when it needs to
4989 be called again for some reason. The only case that it's currently
4990 used for is when neglected dependencies need to be added to the
4991 graph in order to avoid making a potentially unsafe decision.
4994 class _backtrack_mask(_internal_exception):
4996 This is raised by _show_unsatisfied_dep() when it's called with
4997 check_backtrack=True and a matching package has been masked by
5001 def need_restart(self):
5002 return self._dynamic_config._need_restart
5004 def get_runtime_pkg_mask(self):
5005 return self._dynamic_config._runtime_pkg_mask.copy()
5007 class _dep_check_composite_db(portage.dbapi):
5009 A dbapi-like interface that is optimized for use in dep_check() calls.
5010 This is built on top of the existing depgraph package selection logic.
5011 Some packages that have been added to the graph may be masked from this
5012 view in order to influence the atom preference selection that occurs
5015 def __init__(self, depgraph, root):
5016 portage.dbapi.__init__(self)
5017 self._depgraph = depgraph
5019 self._match_cache = {}
5020 self._cpv_pkg_map = {}
5022 def _clear_cache(self):
5023 self._match_cache.clear()
5024 self._cpv_pkg_map.clear()
5026 def match(self, atom):
5027 ret = self._match_cache.get(atom)
5032 atom = self._dep_expand(atom)
5033 pkg, existing = self._depgraph._select_package(self._root, atom)
5037 # Return the highest available from select_package() as well as
5038 # any matching slots in the graph db.
5040 slots.add(pkg.metadata["SLOT"])
5041 if pkg.cp.startswith("virtual/"):
5042 # For new-style virtual lookahead that occurs inside
5043 # dep_check(), examine all slots. This is needed
5044 # so that newer slots will not unnecessarily be pulled in
5045 # when a satisfying lower slot is already installed. For
5046 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5047 # there's no need to pull in a newer slot to satisfy a
5048 # virtual/jdk dependency.
5049 for db, pkg_type, built, installed, db_keys in \
5050 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5051 for cpv in db.match(atom):
5052 if portage.cpv_getkey(cpv) != pkg.cp:
5054 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5056 if self._visible(pkg):
5057 self._cpv_pkg_map[pkg.cpv] = pkg
5059 slots.remove(pkg.metadata["SLOT"])
5061 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5062 pkg, existing = self._depgraph._select_package(
5063 self._root, slot_atom)
5066 if not self._visible(pkg):
5068 self._cpv_pkg_map[pkg.cpv] = pkg
5071 self._cpv_sort_ascending(ret)
5072 self._match_cache[orig_atom] = ret
5075 def _visible(self, pkg):
5076 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5078 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5079 except (StopIteration, portage.exception.InvalidDependString):
5086 self._depgraph._frozen_config.pkgsettings[pkg.root], pkg):
5088 except portage.exception.InvalidDependString:
5090 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5091 self._root].get(pkg.slot_atom)
5092 if in_graph is None:
5093 # Mask choices for packages which are not the highest visible
5094 # version within their slot (since they usually trigger slot
5096 highest_visible, in_graph = self._depgraph._select_package(
5097 self._root, pkg.slot_atom)
5098 if pkg != highest_visible:
5100 elif in_graph != pkg:
5101 # Mask choices for packages that would trigger a slot
5102 # conflict with a previously selected package.
5106 def _dep_expand(self, atom):
5108 This is only needed for old installed packages that may
5109 contain atoms that are not fully qualified with a specific
5110 category. Emulate the cpv_expand() function that's used by
5111 dbapi.match() in cases like this. If there are multiple
5112 matches, it's often due to a new-style virtual that has
5113 been added, so try to filter those out to avoid raising
5116 root_config = self._depgraph.roots[self._root]
5118 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5119 if len(expanded_atoms) > 1:
5120 non_virtual_atoms = []
5121 for x in expanded_atoms:
5122 if not portage.dep_getkey(x).startswith("virtual/"):
5123 non_virtual_atoms.append(x)
5124 if len(non_virtual_atoms) == 1:
5125 expanded_atoms = non_virtual_atoms
5126 if len(expanded_atoms) > 1:
5127 # compatible with portage.cpv_expand()
5128 raise portage.exception.AmbiguousPackageName(
5129 [portage.dep_getkey(x) for x in expanded_atoms])
5131 atom = expanded_atoms[0]
5133 null_atom = Atom(insert_category_into_atom(atom, "null"))
5134 cat, atom_pn = portage.catsplit(null_atom.cp)
5135 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5137 # Allow the resolver to choose which virtual.
5138 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5143 def aux_get(self, cpv, wants):
5144 metadata = self._cpv_pkg_map[cpv].metadata
5145 return [metadata.get(x, "") for x in wants]
5147 def match_pkgs(self, atom):
5148 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5150 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5152 if "--quiet" in myopts:
5153 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5154 print("!!! one of the following fully-qualified ebuild names instead:\n")
5155 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5156 print(" " + colorize("INFORM", cp))
5159 s = search(root_config, spinner, "--searchdesc" in myopts,
5160 "--quiet" not in myopts, "--usepkg" in myopts,
5161 "--usepkgonly" in myopts)
5162 null_cp = portage.dep_getkey(insert_category_into_atom(
5164 cat, atom_pn = portage.catsplit(null_cp)
5165 s.searchkey = atom_pn
5166 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5169 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5170 print("!!! one of the above fully-qualified ebuild names instead.\n")
5172 def insert_category_into_atom(atom, category):
5173 alphanum = re.search(r'\w', atom)
5175 ret = atom[:alphanum.start()] + "%s/" % category + \
5176 atom[alphanum.start():]
5181 def backtrack_depgraph(settings, trees, myopts, myparams,
5182 myaction, myfiles, spinner):
5184 Raises PackageSetNotFound if myfiles contains a missing package set.
5187 runtime_pkg_mask = None
5188 allow_backtracking = True
5190 frozen_config = _frozen_depgraph_config(settings, trees,
5193 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5194 frozen_config=frozen_config,
5195 allow_backtracking=allow_backtracking,
5196 runtime_pkg_mask=runtime_pkg_mask)
5197 success, favorites = mydepgraph.select_files(myfiles)
5199 if mydepgraph.need_restart() and backtracked < backtrack_max:
5200 runtime_pkg_mask = mydepgraph.get_runtime_pkg_mask()
5202 elif backtracked and allow_backtracking:
5203 if "--debug" in myopts:
5205 "\n\nbacktracking aborted after %s tries\n\n" % \
5206 backtracked, noiselevel=-1, level=logging.DEBUG)
5207 # Backtracking failed, so disable it and do
5208 # a plain dep calculation + error message.
5209 allow_backtracking = False
5210 runtime_pkg_mask = None
5215 return (success, mydepgraph, favorites)
5217 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5219 Construct a depgraph for the given resume list. This will raise
5220 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5222 @returns: (success, depgraph, dropped_tasks)
5225 skip_unsatisfied = True
5226 mergelist = mtimedb["resume"]["mergelist"]
5227 dropped_tasks = set()
5228 frozen_config = _frozen_depgraph_config(settings, trees,
5231 mydepgraph = depgraph(settings, trees,
5232 myopts, myparams, spinner, frozen_config=frozen_config)
5234 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5235 skip_masked=skip_masked)
5236 except depgraph.UnsatisfiedResumeDep as e:
5237 if not skip_unsatisfied:
5240 graph = mydepgraph._dynamic_config.digraph
5241 unsatisfied_parents = dict((dep.parent, dep.parent) \
5243 traversed_nodes = set()
5244 unsatisfied_stack = list(unsatisfied_parents)
5245 while unsatisfied_stack:
5246 pkg = unsatisfied_stack.pop()
5247 if pkg in traversed_nodes:
5249 traversed_nodes.add(pkg)
5251 # If this package was pulled in by a parent
5252 # package scheduled for merge, removing this
5253 # package may cause the the parent package's
5254 # dependency to become unsatisfied.
5255 for parent_node in graph.parent_nodes(pkg):
5256 if not isinstance(parent_node, Package) \
5257 or parent_node.operation not in ("merge", "nomerge"):
5260 graph.child_nodes(parent_node,
5261 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5262 if pkg in unsatisfied:
5263 unsatisfied_parents[parent_node] = parent_node
5264 unsatisfied_stack.append(parent_node)
5266 pruned_mergelist = []
5268 if isinstance(x, list) and \
5269 tuple(x) not in unsatisfied_parents:
5270 pruned_mergelist.append(x)
5272 # If the mergelist doesn't shrink then this loop is infinite.
5273 if len(pruned_mergelist) == len(mergelist):
5274 # This happens if a package can't be dropped because
5275 # it's already installed, but it has unsatisfied PDEPEND.
5277 mergelist[:] = pruned_mergelist
5279 # Exclude installed packages that have been removed from the graph due
5280 # to failure to build/install runtime dependencies after the dependent
5281 # package has already been installed.
5282 dropped_tasks.update(pkg for pkg in \
5283 unsatisfied_parents if pkg.operation != "nomerge")
5284 mydepgraph.break_refs(unsatisfied_parents)
5286 del e, graph, traversed_nodes, \
5287 unsatisfied_parents, unsatisfied_stack
5291 return (success, mydepgraph, dropped_tasks)
5293 def get_mask_info(root_config, cpv, pkgsettings,
5294 db, pkg_type, built, installed, db_keys):
5297 metadata = dict(zip(db_keys,
5298 db.aux_get(cpv, db_keys)))
5302 if metadata is None:
5303 mreasons = ["corruption"]
5305 eapi = metadata['EAPI']
5308 if not portage.eapi_is_supported(eapi):
5309 mreasons = ['EAPI %s' % eapi]
5311 pkg = Package(type_name=pkg_type, root_config=root_config,
5312 cpv=cpv, built=built, installed=installed, metadata=metadata)
5313 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5314 return metadata, mreasons
5316 def show_masked_packages(masked_packages):
5317 shown_licenses = set()
5318 shown_comments = set()
5319 # Maybe there is both an ebuild and a binary. Only
5320 # show one of them to avoid redundant appearance.
5322 have_eapi_mask = False
5323 for (root_config, pkgsettings, cpv,
5324 metadata, mreasons) in masked_packages:
5325 if cpv in shown_cpvs:
5328 comment, filename = None, None
5329 if "package.mask" in mreasons:
5330 comment, filename = \
5331 portage.getmaskingreason(
5332 cpv, metadata=metadata,
5333 settings=pkgsettings,
5334 portdb=root_config.trees["porttree"].dbapi,
5335 return_location=True)
5336 missing_licenses = []
5338 if not portage.eapi_is_supported(metadata["EAPI"]):
5339 have_eapi_mask = True
5341 missing_licenses = \
5342 pkgsettings._getMissingLicenses(
5344 except portage.exception.InvalidDependString:
5345 # This will have already been reported
5346 # above via mreasons.
5349 print("- "+cpv+" (masked by: "+", ".join(mreasons)+")")
5351 if comment and comment not in shown_comments:
5352 writemsg_stdout(filename + ":\n" + comment + "\n",
5354 shown_comments.add(comment)
5355 portdb = root_config.trees["porttree"].dbapi
5356 for l in missing_licenses:
5357 l_path = portdb.findLicensePath(l)
5358 if l in shown_licenses:
5360 msg = ("A copy of the '%s' license" + \
5361 " is located at '%s'.") % (l, l_path)
5364 shown_licenses.add(l)
5365 return have_eapi_mask
5367 def show_mask_docs():
5368 print("For more information, see the MASKED PACKAGES section in the emerge")
5369 print("man page or refer to the Gentoo Handbook.")
5371 def filter_iuse_defaults(iuse):
5373 if flag.startswith("+") or flag.startswith("-"):
5378 def show_blocker_docs_link():
5380 print("For more information about " + bad("Blocked Packages") + ", please refer to the following")
5381 print("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):")
5383 print("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked")
5386 def get_masking_status(pkg, pkgsettings, root_config):
5388 mreasons = portage.getmaskingstatus(
5389 pkg, settings=pkgsettings,
5390 portdb=root_config.trees["porttree"].dbapi)
5392 if not pkg.installed:
5393 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5394 mreasons.append("CHOST: %s" % \
5395 pkg.metadata["CHOST"])
5397 for msg_type, msgs in pkg.invalid.items():
5399 mreasons.append("invalid: %s" % (msg,))
5401 if not pkg.metadata["SLOT"]:
5402 mreasons.append("invalid: SLOT is undefined")