1 # Copyright 1999-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 from __future__ import print_function
12 from itertools import chain
15 from portage import os
16 from portage import digraph
17 from portage.dep import Atom
18 from portage.output import bold, blue, colorize, create_color_func, darkblue, \
19 darkgreen, green, nc_len, red, teal, turquoise, yellow
20 bad = create_color_func("BAD")
21 from portage._sets import SETPREFIX
22 from portage._sets.base import InternalPackageSet
23 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
24 from portage.util import writemsg_level
26 from _emerge.AtomArg import AtomArg
27 from _emerge.Blocker import Blocker
28 from _emerge.BlockerCache import BlockerCache
29 from _emerge.BlockerDepPriority import BlockerDepPriority
30 from _emerge.changelog import calc_changelog
31 from _emerge.countdown import countdown
32 from _emerge.create_world_atom import create_world_atom
33 from _emerge.Dependency import Dependency
34 from _emerge.DependencyArg import DependencyArg
35 from _emerge.DepPriority import DepPriority
36 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
37 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
38 from _emerge.FakeVartree import FakeVartree
39 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
40 from _emerge.format_size import format_size
41 from _emerge.is_valid_package_atom import is_valid_package_atom
42 from _emerge.Package import Package
43 from _emerge.PackageArg import PackageArg
44 from _emerge.PackageCounters import PackageCounters
45 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
46 from _emerge.RepoDisplay import RepoDisplay
47 from _emerge.RootConfig import RootConfig
48 from _emerge.search import search
49 from _emerge.SetArg import SetArg
50 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
51 from _emerge.UnmergeDepPriority import UnmergeDepPriority
52 from _emerge.visible import visible
54 if sys.hexversion >= 0x3000000:
58 class _frozen_depgraph_config(object):
60 def __init__(self, settings, trees, myopts, spinner):
61 self.settings = settings
62 self.target_root = settings["ROOT"]
65 if settings.get("PORTAGE_DEBUG", "") == "1":
67 self.spinner = spinner
68 self._running_root = trees["/"]["root_config"]
69 self._opts_no_restart = frozenset(["--buildpkgonly",
70 "--fetchonly", "--fetch-all-uri", "--pretend"])
73 self._trees_orig = trees
75 # All Package instances
78 self.trees[myroot] = {}
79 # Create a RootConfig instance that references
80 # the FakeVartree instead of the real one.
81 self.roots[myroot] = RootConfig(
82 trees[myroot]["vartree"].settings,
84 trees[myroot]["root_config"].setconfig)
85 for tree in ("porttree", "bintree"):
86 self.trees[myroot][tree] = trees[myroot][tree]
87 self.trees[myroot]["vartree"] = \
88 FakeVartree(trees[myroot]["root_config"],
89 pkg_cache=self._pkg_cache)
90 self.pkgsettings[myroot] = portage.config(
91 clone=self.trees[myroot]["vartree"].settings)
93 self._required_set_names = set(["world"])
95 class _dynamic_depgraph_config(object):
97 def __init__(self, depgraph, myparams, allow_backtracking,
99 self.myparams = myparams.copy()
100 self._vdb_loaded = False
101 self._allow_backtracking = allow_backtracking
102 # Maps slot atom to package for each Package added to the graph.
103 self._slot_pkg_map = {}
104 # Maps nodes to the reasons they were selected for reinstallation.
105 self._reinstall_nodes = {}
107 # Contains a filtered view of preferred packages that are selected
108 # from available repositories.
109 self._filtered_trees = {}
110 # Contains installed packages and new packages that have been added
112 self._graph_trees = {}
113 # Caches visible packages returned from _select_package, for use in
114 # depgraph._iter_atoms_for_pkg() SLOT logic.
115 self._visible_pkgs = {}
116 #contains the args created by select_files
117 self._initial_arg_list = []
118 self.digraph = portage.digraph()
119 # contains all sets added to the graph
121 # contains atoms given as arguments
122 self._sets["args"] = InternalPackageSet()
123 # contains all atoms from all sets added to the graph, including
124 # atoms given as arguments
125 self._set_atoms = InternalPackageSet()
126 self._atom_arg_map = {}
127 # contains all nodes pulled in by self._set_atoms
128 self._set_nodes = set()
129 # Contains only Blocker -> Uninstall edges
130 self._blocker_uninstalls = digraph()
131 # Contains only Package -> Blocker edges
132 self._blocker_parents = digraph()
133 # Contains only irrelevant Package -> Blocker edges
134 self._irrelevant_blockers = digraph()
135 # Contains only unsolvable Package -> Blocker edges
136 self._unsolvable_blockers = digraph()
137 # Contains all Blocker -> Blocked Package edges
138 self._blocked_pkgs = digraph()
139 # Contains world packages that have been protected from
140 # uninstallation but may not have been added to the graph
141 # if the graph is not complete yet.
142 self._blocked_world_pkgs = {}
143 self._slot_collision_info = {}
144 # Slot collision nodes are not allowed to block other packages since
145 # blocker validation is only able to account for one package per slot.
146 self._slot_collision_nodes = set()
147 self._parent_atoms = {}
148 self._slot_conflict_parent_atoms = set()
149 self._serialized_tasks_cache = None
150 self._scheduler_graph = None
151 self._displayed_list = None
152 self._pprovided_args = []
153 self._missing_args = []
154 self._masked_installed = set()
155 self._masked_license_updates = set()
156 self._unsatisfied_deps_for_display = []
157 self._unsatisfied_blockers_for_display = None
158 self._circular_deps_for_display = None
160 self._dep_disjunctive_stack = []
161 self._unsatisfied_deps = []
162 self._initially_unsatisfied_deps = []
163 self._ignored_deps = []
164 self._highest_pkg_cache = {}
165 if runtime_pkg_mask is None:
166 runtime_pkg_mask = {}
168 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
169 runtime_pkg_mask.items())
170 self._runtime_pkg_mask = runtime_pkg_mask
171 self._need_restart = False
173 for myroot in depgraph._frozen_config.trees:
174 self._slot_pkg_map[myroot] = {}
175 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
176 # This dbapi instance will model the state that the vdb will
177 # have after new packages have been installed.
178 fakedb = PackageVirtualDbapi(vardb.settings)
180 self.mydbapi[myroot] = fakedb
183 graph_tree.dbapi = fakedb
184 self._graph_trees[myroot] = {}
185 self._filtered_trees[myroot] = {}
186 # Substitute the graph tree for the vartree in dep_check() since we
187 # want atom selections to be consistent with package selections
188 # have already been made.
189 self._graph_trees[myroot]["porttree"] = graph_tree
190 self._graph_trees[myroot]["vartree"] = graph_tree
193 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
194 self._filtered_trees[myroot]["porttree"] = filtered_tree
195 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
197 # Passing in graph_tree as the vartree here could lead to better
198 # atom selections in some cases by causing atoms for packages that
199 # have been added to the graph to be preferred over other choices.
200 # However, it can trigger atom selections that result in
201 # unresolvable direct circular dependencies. For example, this
202 # happens with gwydion-dylan which depends on either itself or
203 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
204 # gwydion-dylan-bin needs to be selected in order to avoid a
205 # an unresolvable direct circular dependency.
207 # To solve the problem described above, pass in "graph_db" so that
208 # packages that have been added to the graph are distinguishable
209 # from other available packages and installed packages. Also, pass
210 # the parent package into self._select_atoms() calls so that
211 # unresolvable direct circular dependencies can be detected and
212 # avoided when possible.
213 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
214 self._filtered_trees[myroot]["vartree"] = \
215 depgraph._frozen_config.trees[myroot]["vartree"]
218 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
219 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
220 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
221 # (db, pkg_type, built, installed, db_keys)
222 if "--usepkgonly" not in depgraph._frozen_config.myopts:
223 db_keys = list(portdb._aux_cache_keys)
224 dbs.append((portdb, "ebuild", False, False, db_keys))
225 if "--usepkg" in depgraph._frozen_config.myopts:
226 db_keys = list(bindb._aux_cache_keys)
227 dbs.append((bindb, "binary", True, False, db_keys))
228 db_keys = list(depgraph._frozen_config._trees_orig[myroot
229 ]["vartree"].dbapi._aux_cache_keys)
230 dbs.append((vardb, "installed", True, True, db_keys))
231 self._filtered_trees[myroot]["dbs"] = dbs
232 if "--usepkg" in depgraph._frozen_config.myopts:
233 depgraph._frozen_config._trees_orig[myroot
234 ]["bintree"].populate(
235 "--getbinpkg" in depgraph._frozen_config.myopts,
236 "--getbinpkgonly" in depgraph._frozen_config.myopts)
238 class depgraph(object):
240 pkg_tree_map = RootConfig.pkg_tree_map
242 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
244 def __init__(self, settings, trees, myopts, myparams, spinner,
245 frozen_config=None, runtime_pkg_mask=None, allow_backtracking=False):
246 if frozen_config is None:
247 frozen_config = _frozen_depgraph_config(settings, trees,
249 self._frozen_config = frozen_config
250 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
251 allow_backtracking, runtime_pkg_mask)
253 self._select_atoms = self._select_atoms_highest_available
254 self._select_package = self._select_pkg_highest_available
258 Load installed package metadata if appropriate. This used to be called
259 from the constructor, but that wasn't very nice since this procedure
260 is slow and it generates spinner output. So, now it's called on-demand
261 by various methods when necessary.
264 if self._dynamic_config._vdb_loaded:
267 for myroot in self._frozen_config.trees:
269 preload_installed_pkgs = \
270 "--nodeps" not in self._frozen_config.myopts and \
271 "--buildpkgonly" not in self._frozen_config.myopts
273 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
274 if not fake_vartree.dbapi:
275 # This needs to be called for the first depgraph, but not for
276 # backtracking depgraphs that share the same frozen_config.
279 if preload_installed_pkgs:
280 vardb = fake_vartree.dbapi
281 fakedb = self._dynamic_config._graph_trees[
282 myroot]["vartree"].dbapi
285 self._spinner_update()
286 # This triggers metadata updates via FakeVartree.
287 vardb.aux_get(pkg.cpv, [])
288 fakedb.cpv_inject(pkg)
290 # Now that the vardb state is cached in our FakeVartree,
291 # we won't be needing the real vartree cache for awhile.
292 # To make some room on the heap, clear the vardbapi
294 self._frozen_config._trees_orig[myroot
295 ]["vartree"].dbapi._clear_cache()
298 self._dynamic_config._vdb_loaded = True
300 def _spinner_update(self):
301 if self._frozen_config.spinner:
302 self._frozen_config.spinner.update()
304 def _show_missed_update(self):
306 if '--quiet' in self._frozen_config.myopts and \
307 '--debug' not in self._frozen_config.myopts:
310 # In order to minimize noise, show only the highest
311 # missed update from each SLOT.
313 for pkg, mask_reasons in \
314 self._dynamic_config._runtime_pkg_mask.items():
316 # Exclude installed here since we only
317 # want to show available updates.
319 k = (pkg.root, pkg.slot_atom)
320 if k in missed_updates:
321 other_pkg, mask_type, parent_atoms = missed_updates[k]
324 for mask_type, parent_atoms in mask_reasons.items():
327 missed_updates[k] = (pkg, mask_type, parent_atoms)
330 if not missed_updates:
333 missed_update_types = {}
334 for pkg, mask_type, parent_atoms in missed_updates.values():
335 missed_update_types.setdefault(mask_type,
336 []).append((pkg, parent_atoms))
338 self._show_missed_update_slot_conflicts(
339 missed_update_types.get("slot conflict"))
341 self._show_missed_update_unsatisfied_dep(
342 missed_update_types.get("missing dependency"))
344 def _show_missed_update_unsatisfied_dep(self, missed_updates):
346 if not missed_updates:
349 write = sys.stderr.write
350 backtrack_masked = []
352 for pkg, parent_atoms in missed_updates:
355 for parent, root, atom in parent_atoms:
356 self._show_unsatisfied_dep(root, atom, myparent=parent,
357 check_backtrack=True)
358 except self._backtrack_mask:
359 # This is displayed below in abbreviated form.
360 backtrack_masked.append((pkg, parent_atoms))
363 write("\n!!! The following update has been skipped " + \
364 "due to unsatisfied dependencies:\n\n")
366 write(str(pkg.slot_atom))
368 write(" for %s" % (pkg.root,))
371 for parent, root, atom in parent_atoms:
372 self._show_unsatisfied_dep(root, atom, myparent=parent)
376 # These are shown in abbreviated form, in order to avoid terminal
377 # flooding from mask messages as reported in bug #285832.
378 write("\n!!! The following update(s) have been skipped " + \
379 "due to unsatisfied dependencies\n" + \
380 "!!! triggered by backtracking:\n\n")
381 for pkg, parent_atoms in backtrack_masked:
382 write(str(pkg.slot_atom))
384 write(" for %s" % (pkg.root,))
389 def _show_missed_update_slot_conflicts(self, missed_updates):
391 if not missed_updates:
395 msg.append("\n!!! One or more updates have been skipped due to " + \
396 "a dependency conflict:\n\n")
399 for pkg, parent_atoms in missed_updates:
400 msg.append(str(pkg.slot_atom))
402 msg.append(" for %s" % (pkg.root,))
405 for parent, atom in parent_atoms:
409 msg.append(" conflicts with\n")
411 if isinstance(parent,
412 (PackageArg, AtomArg)):
413 # For PackageArg and AtomArg types, it's
414 # redundant to display the atom attribute.
415 msg.append(str(parent))
417 # Display the specific atom from SetArg or
419 msg.append("%s required by %s" % (atom, parent))
423 sys.stderr.write("".join(msg))
426 def _show_slot_collision_notice(self):
427 """Show an informational message advising the user to mask one of the
428 the packages. In some cases it may be possible to resolve this
429 automatically, but support for backtracking (removal nodes that have
430 already been selected) will be required in order to handle all possible
434 if not self._dynamic_config._slot_collision_info:
437 self._show_merge_list()
440 msg.append("\n!!! Multiple package instances within a single " + \
441 "package slot have been pulled\n")
442 msg.append("!!! into the dependency graph, resulting" + \
443 " in a slot conflict:\n\n")
445 # Max number of parents shown, to avoid flooding the display.
447 explanation_columns = 70
449 for (slot_atom, root), slot_nodes \
450 in self._dynamic_config._slot_collision_info.items():
451 msg.append(str(slot_atom))
453 msg.append(" for %s" % (root,))
456 for node in slot_nodes:
458 msg.append(str(node))
459 parent_atoms = self._dynamic_config._parent_atoms.get(node)
462 # Prefer conflict atoms over others.
463 for parent_atom in parent_atoms:
464 if len(pruned_list) >= max_parents:
466 if parent_atom in self._dynamic_config._slot_conflict_parent_atoms:
467 pruned_list.add(parent_atom)
469 # If this package was pulled in by conflict atoms then
470 # show those alone since those are the most interesting.
472 # When generating the pruned list, prefer instances
473 # of DependencyArg over instances of Package.
474 for parent_atom in parent_atoms:
475 if len(pruned_list) >= max_parents:
477 parent, atom = parent_atom
478 if isinstance(parent, DependencyArg):
479 pruned_list.add(parent_atom)
480 # Prefer Packages instances that themselves have been
481 # pulled into collision slots.
482 for parent_atom in parent_atoms:
483 if len(pruned_list) >= max_parents:
485 parent, atom = parent_atom
486 if isinstance(parent, Package) and \
487 (parent.slot_atom, parent.root) \
488 in self._dynamic_config._slot_collision_info:
489 pruned_list.add(parent_atom)
490 for parent_atom in parent_atoms:
491 if len(pruned_list) >= max_parents:
493 pruned_list.add(parent_atom)
494 omitted_parents = len(parent_atoms) - len(pruned_list)
495 parent_atoms = pruned_list
496 msg.append(" pulled in by\n")
497 for parent_atom in parent_atoms:
498 parent, atom = parent_atom
500 if isinstance(parent,
501 (PackageArg, AtomArg)):
502 # For PackageArg and AtomArg types, it's
503 # redundant to display the atom attribute.
504 msg.append(str(parent))
506 # Display the specific atom from SetArg or
508 msg.append("%s required by %s" % (atom, parent))
512 msg.append("(and %d more)\n" % omitted_parents)
514 msg.append(" (no parents)\n")
516 explanation = self._slot_conflict_explanation(slot_nodes)
519 msg.append(indent + "Explanation:\n\n")
520 for line in textwrap.wrap(explanation, explanation_columns):
521 msg.append(2*indent + line + "\n")
524 sys.stderr.write("".join(msg))
527 explanations_for_all = explanations == len(self._dynamic_config._slot_collision_info)
529 if explanations_for_all or "--quiet" in self._frozen_config.myopts:
533 msg.append("It may be possible to solve this problem ")
534 msg.append("by using package.mask to prevent one of ")
535 msg.append("those packages from being selected. ")
536 msg.append("However, it is also possible that conflicting ")
537 msg.append("dependencies exist such that they are impossible to ")
538 msg.append("satisfy simultaneously. If such a conflict exists in ")
539 msg.append("the dependencies of two different packages, then those ")
540 msg.append("packages can not be installed simultaneously.")
542 from formatter import AbstractFormatter, DumbWriter
543 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
545 f.add_flowing_data(x)
549 msg.append("For more information, see MASKED PACKAGES ")
550 msg.append("section in the emerge man page or refer ")
551 msg.append("to the Gentoo Handbook.")
553 f.add_flowing_data(x)
557 def _slot_conflict_explanation(self, slot_nodes):
559 When a slot conflict occurs due to USE deps, there are a few
560 different cases to consider:
562 1) New USE are correctly set but --newuse wasn't requested so an
563 installed package with incorrect USE happened to get pulled
564 into graph before the new one.
566 2) New USE are incorrectly set but an installed package has correct
567 USE so it got pulled into the graph, and a new instance also got
568 pulled in due to --newuse or an upgrade.
570 3) Multiple USE deps exist that can't be satisfied simultaneously,
571 and multiple package instances got pulled into the same slot to
572 satisfy the conflicting deps.
574 Currently, explanations and suggested courses of action are generated
575 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
578 if len(slot_nodes) != 2:
579 # Suggestions are only implemented for
580 # conflicts between two packages.
583 all_conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms
586 unmatched_node = None
587 for node in slot_nodes:
588 parent_atoms = self._dynamic_config._parent_atoms.get(node)
590 # Normally, there are always parent atoms. If there are
591 # none then something unexpected is happening and there's
592 # currently no suggestion for this case.
594 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
595 for parent_atom in conflict_atoms:
596 parent, atom = parent_atom
598 # Suggestions are currently only implemented for cases
599 # in which all conflict atoms have USE deps.
602 if matched_node is not None:
603 # If conflict atoms match multiple nodes
604 # then there's no suggestion.
607 matched_atoms = conflict_atoms
609 if unmatched_node is not None:
610 # Neither node is matched by conflict atoms, and
611 # there is no suggestion for this case.
613 unmatched_node = node
615 if matched_node is None or unmatched_node is None:
616 # This shouldn't happen.
619 if unmatched_node.installed and not matched_node.installed and \
620 unmatched_node.cpv == matched_node.cpv:
621 # If the conflicting packages are the same version then
622 # --newuse should be all that's needed. If they are different
623 # versions then there's some other problem.
624 return "New USE are correctly set, but --newuse wasn't" + \
625 " requested, so an installed package with incorrect USE " + \
626 "happened to get pulled into the dependency graph. " + \
627 "In order to solve " + \
628 "this, either specify the --newuse option or explicitly " + \
629 " reinstall '%s'." % matched_node.slot_atom
631 if matched_node.installed and not unmatched_node.installed:
632 atoms = sorted(set(atom for parent, atom in matched_atoms))
633 explanation = ("New USE for '%s' are incorrectly set. " + \
634 "In order to solve this, adjust USE to satisfy '%s'") % \
635 (matched_node.slot_atom, atoms[0])
637 for atom in atoms[1:-1]:
638 explanation += ", '%s'" % (atom,)
641 explanation += " and '%s'" % (atoms[-1],)
647 def _process_slot_conflicts(self):
649 Process slot conflict data to identify specific atoms which
650 lead to conflict. These atoms only match a subset of the
651 packages that have been pulled into a given slot.
653 for (slot_atom, root), slot_nodes \
654 in self._dynamic_config._slot_collision_info.items():
656 all_parent_atoms = set()
657 for pkg in slot_nodes:
658 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
661 all_parent_atoms.update(parent_atoms)
663 for pkg in slot_nodes:
664 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
665 if parent_atoms is None:
667 self._dynamic_config._parent_atoms[pkg] = parent_atoms
668 for parent_atom in all_parent_atoms:
669 if parent_atom in parent_atoms:
671 # Use package set for matching since it will match via
672 # PROVIDE when necessary, while match_from_list does not.
673 parent, atom = parent_atom
674 atom_set = InternalPackageSet(
675 initial_atoms=(atom,))
676 if atom_set.findAtomForPackage(pkg):
677 parent_atoms.add(parent_atom)
679 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
681 def _reinstall_for_flags(self, forced_flags,
682 orig_use, orig_iuse, cur_use, cur_iuse):
683 """Return a set of flags that trigger reinstallation, or None if there
684 are no such flags."""
685 if "--newuse" in self._frozen_config.myopts or \
686 "--binpkg-respect-use" in self._frozen_config.myopts:
687 flags = set(orig_iuse.symmetric_difference(
688 cur_iuse).difference(forced_flags))
689 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
690 cur_iuse.intersection(cur_use)))
693 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
694 flags = orig_iuse.intersection(orig_use).symmetric_difference(
695 cur_iuse.intersection(cur_use))
700 def _create_graph(self, allow_unsatisfied=False):
701 dep_stack = self._dynamic_config._dep_stack
702 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
703 while dep_stack or dep_disjunctive_stack:
704 self._spinner_update()
706 dep = dep_stack.pop()
707 if isinstance(dep, Package):
708 if not self._add_pkg_deps(dep,
709 allow_unsatisfied=allow_unsatisfied):
712 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
714 if dep_disjunctive_stack:
715 if not self._pop_disjunction(allow_unsatisfied):
719 def _add_dep(self, dep, allow_unsatisfied=False):
720 debug = "--debug" in self._frozen_config.myopts
721 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
722 nodeps = "--nodeps" in self._frozen_config.myopts
723 empty = "empty" in self._dynamic_config.myparams
724 deep = self._dynamic_config.myparams.get("deep", 0)
725 recurse = empty or deep is True or dep.depth <= deep
727 if not buildpkgonly and \
729 dep.parent not in self._dynamic_config._slot_collision_nodes:
730 if dep.parent.onlydeps:
731 # It's safe to ignore blockers if the
732 # parent is an --onlydeps node.
734 # The blocker applies to the root where
735 # the parent is or will be installed.
736 blocker = Blocker(atom=dep.atom,
737 eapi=dep.parent.metadata["EAPI"],
738 root=dep.parent.root)
739 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
742 if dep.child is None:
743 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
744 onlydeps=dep.onlydeps)
746 # The caller has selected a specific package
747 # via self._minimize_packages().
749 existing_node = self._dynamic_config._slot_pkg_map[
750 dep.root].get(dep_pkg.slot_atom)
751 if existing_node is not dep_pkg:
755 if dep.priority.optional:
756 # This could be an unecessary build-time dep
757 # pulled in by --with-bdeps=y.
759 if allow_unsatisfied:
760 self._dynamic_config._unsatisfied_deps.append(dep)
762 self._dynamic_config._unsatisfied_deps_for_display.append(
763 ((dep.root, dep.atom), {"myparent":dep.parent}))
765 # The parent node should not already be in
766 # runtime_pkg_mask, since that would trigger an
767 # infinite backtracking loop.
768 if self._dynamic_config._allow_backtracking:
769 if dep.parent in self._dynamic_config._runtime_pkg_mask:
770 if "--debug" in self._frozen_config.myopts:
772 "!!! backtracking loop detected: %s %s\n" % \
774 self._dynamic_config._runtime_pkg_mask[
775 dep.parent]), noiselevel=-1)
777 # Do not backtrack if only USE have to be changed in
778 # order to satisfy the dependency.
779 dep_pkg, existing_node = \
780 self._select_package(dep.root, dep.atom.without_use,
781 onlydeps=dep.onlydeps)
783 self._dynamic_config._runtime_pkg_mask.setdefault(
784 dep.parent, {})["missing dependency"] = \
785 set([(dep.parent, dep.root, dep.atom)])
786 self._dynamic_config._need_restart = True
787 if "--debug" in self._frozen_config.myopts:
791 msg.append("backtracking due to unsatisfied dep:")
792 msg.append(" parent: %s" % dep.parent)
793 msg.append(" priority: %s" % dep.priority)
794 msg.append(" root: %s" % dep.root)
795 msg.append(" atom: %s" % dep.atom)
797 writemsg_level("".join("%s\n" % l for l in msg),
798 noiselevel=-1, level=logging.DEBUG)
801 # In some cases, dep_check will return deps that shouldn't
802 # be proccessed any further, so they are identified and
803 # discarded here. Try to discard as few as possible since
804 # discarded dependencies reduce the amount of information
805 # available for optimization of merge order.
806 if dep.priority.satisfied and \
807 not dep_pkg.installed and \
808 not (existing_node or recurse):
810 if dep.root == self._frozen_config.target_root:
812 myarg = next(self._iter_atoms_for_pkg(dep_pkg))
813 except StopIteration:
815 except portage.exception.InvalidDependString:
816 if not dep_pkg.installed:
817 # This shouldn't happen since the package
818 # should have been masked.
821 self._dynamic_config._ignored_deps.append(dep)
824 if not self._add_pkg(dep_pkg, dep):
828 def _add_pkg(self, pkg, dep):
835 myparent = dep.parent
836 priority = dep.priority
839 priority = DepPriority()
841 Fills the digraph with nodes comprised of packages to merge.
842 mybigkey is the package spec of the package to merge.
843 myparent is the package depending on mybigkey ( or None )
844 addme = Should we add this package to the digraph or are we just looking at it's deps?
845 Think --onlydeps, we need to ignore packages in that case.
848 #IUSE-aware emerge -> USE DEP aware depgraph
849 #"no downgrade" emerge
851 # Ensure that the dependencies of the same package
852 # are never processed more than once.
853 previously_added = pkg in self._dynamic_config.digraph
855 # select the correct /var database that we'll be checking against
856 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
857 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
862 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
863 except portage.exception.InvalidDependString as e:
864 if not pkg.installed:
865 show_invalid_depstring_notice(
866 pkg, pkg.metadata["PROVIDE"], str(e))
871 if not pkg.installed and \
872 "empty" not in self._dynamic_config.myparams and \
873 vardbapi.match(pkg.slot_atom):
874 # Increase the priority of dependencies on packages that
875 # are being rebuilt. This optimizes merge order so that
876 # dependencies are rebuilt/updated as soon as possible,
877 # which is needed especially when emerge is called by
878 # revdep-rebuild since dependencies may be affected by ABI
879 # breakage that has rendered them useless. Don't adjust
880 # priority here when in "empty" mode since all packages
881 # are being merged in that case.
882 priority.rebuild = True
884 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
885 slot_collision = False
887 existing_node_matches = pkg.cpv == existing_node.cpv
888 if existing_node_matches and \
889 pkg != existing_node and \
890 dep.atom is not None:
891 # Use package set for matching since it will match via
892 # PROVIDE when necessary, while match_from_list does not.
893 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
894 if not atom_set.findAtomForPackage(existing_node):
895 existing_node_matches = False
896 if existing_node_matches:
897 # The existing node can be reused.
899 for parent_atom in arg_atoms:
900 parent, atom = parent_atom
901 self._dynamic_config.digraph.add(existing_node, parent,
903 self._add_parent_atom(existing_node, parent_atom)
904 # If a direct circular dependency is not an unsatisfied
905 # buildtime dependency then drop it here since otherwise
906 # it can skew the merge order calculation in an unwanted
908 if existing_node != myparent or \
909 (priority.buildtime and not priority.satisfied):
910 self._dynamic_config.digraph.addnode(existing_node, myparent,
912 if dep.atom is not None and dep.parent is not None:
913 self._add_parent_atom(existing_node,
914 (dep.parent, dep.atom))
917 # A slot conflict has occurred.
918 # The existing node should not already be in
919 # runtime_pkg_mask, since that would trigger an
920 # infinite backtracking loop.
921 if self._dynamic_config._allow_backtracking and \
923 self._dynamic_config._runtime_pkg_mask:
924 if "--debug" in self._frozen_config.myopts:
926 "!!! backtracking loop detected: %s %s\n" % \
928 self._dynamic_config._runtime_pkg_mask[
929 existing_node]), noiselevel=-1)
930 elif self._dynamic_config._allow_backtracking and \
931 not self._accept_blocker_conflicts():
932 self._add_slot_conflict(pkg)
933 if dep.atom is not None and dep.parent is not None:
934 self._add_parent_atom(pkg, (dep.parent, dep.atom))
936 for parent_atom in arg_atoms:
937 parent, atom = parent_atom
938 self._add_parent_atom(pkg, parent_atom)
939 self._process_slot_conflicts()
942 self._dynamic_config._parent_atoms.get(pkg, set())
944 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
946 parent_atoms = conflict_atoms
947 if pkg >= existing_node:
948 # We only care about the parent atoms
949 # when they trigger a downgrade.
952 self._dynamic_config._runtime_pkg_mask.setdefault(
953 existing_node, {})["slot conflict"] = parent_atoms
954 self._dynamic_config._need_restart = True
955 if "--debug" in self._frozen_config.myopts:
959 msg.append("backtracking due to slot conflict:")
960 msg.append(" package: %s" % existing_node)
961 msg.append(" slot: %s" % existing_node.slot_atom)
962 msg.append(" parents: %s" % \
963 [(str(parent), atom) \
964 for parent, atom in parent_atoms])
966 writemsg_level("".join("%s\n" % l for l in msg),
967 noiselevel=-1, level=logging.DEBUG)
970 # A slot collision has occurred. Sometimes this coincides
971 # with unresolvable blockers, so the slot collision will be
972 # shown later if there are no unresolvable blockers.
973 self._add_slot_conflict(pkg)
974 slot_collision = True
977 # Now add this node to the graph so that self.display()
978 # can show use flags and --tree portage.output. This node is
979 # only being partially added to the graph. It must not be
980 # allowed to interfere with the other nodes that have been
981 # added. Do not overwrite data for existing nodes in
982 # self._dynamic_config.mydbapi since that data will be used for blocker
984 # Even though the graph is now invalid, continue to process
985 # dependencies so that things like --fetchonly can still
986 # function despite collisions.
988 elif not previously_added:
989 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
990 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
991 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
993 if not pkg.installed:
994 # Allow this package to satisfy old-style virtuals in case it
995 # doesn't already. Any pre-existing providers will be preferred
998 pkgsettings.setinst(pkg.cpv, pkg.metadata)
999 # For consistency, also update the global virtuals.
1000 settings = self._frozen_config.roots[pkg.root].settings
1002 settings.setinst(pkg.cpv, pkg.metadata)
1004 except portage.exception.InvalidDependString as e:
1005 show_invalid_depstring_notice(
1006 pkg, pkg.metadata["PROVIDE"], str(e))
1011 self._dynamic_config._set_nodes.add(pkg)
1013 # Do this even when addme is False (--onlydeps) so that the
1014 # parent/child relationship is always known in case
1015 # self._show_slot_collision_notice() needs to be called later.
1016 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1017 if dep.atom is not None and dep.parent is not None:
1018 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1021 for parent_atom in arg_atoms:
1022 parent, atom = parent_atom
1023 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1024 self._add_parent_atom(pkg, parent_atom)
1026 """ This section determines whether we go deeper into dependencies or not.
1027 We want to go deeper on a few occasions:
1028 Installing package A, we need to make sure package A's deps are met.
1029 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1030 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1035 deep = self._dynamic_config.myparams.get("deep", 0)
1036 empty = "empty" in self._dynamic_config.myparams
1037 recurse = empty or deep is True or depth + 1 <= deep
1038 dep_stack = self._dynamic_config._dep_stack
1039 if "recurse" not in self._dynamic_config.myparams:
1041 elif pkg.installed and not recurse:
1042 dep_stack = self._dynamic_config._ignored_deps
1044 self._spinner_update()
1046 if not previously_added:
1047 dep_stack.append(pkg)
1050 def _add_parent_atom(self, pkg, parent_atom):
1051 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1052 if parent_atoms is None:
1053 parent_atoms = set()
1054 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1055 parent_atoms.add(parent_atom)
1057 def _add_slot_conflict(self, pkg):
1058 self._dynamic_config._slot_collision_nodes.add(pkg)
1059 slot_key = (pkg.slot_atom, pkg.root)
1060 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1061 if slot_nodes is None:
1063 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1064 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1067 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1069 mytype = pkg.type_name
1072 metadata = pkg.metadata
1073 myuse = pkg.use.enabled
1075 depth = pkg.depth + 1
1076 removal_action = "remove" in self._dynamic_config.myparams
1079 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1081 edepend[k] = metadata[k]
1083 if not pkg.built and \
1084 "--buildpkgonly" in self._frozen_config.myopts and \
1085 "deep" not in self._dynamic_config.myparams and \
1086 "empty" not in self._dynamic_config.myparams:
1087 edepend["RDEPEND"] = ""
1088 edepend["PDEPEND"] = ""
1089 bdeps_optional = False
1091 if pkg.built and not removal_action:
1092 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1093 # Pull in build time deps as requested, but marked them as
1094 # "optional" since they are not strictly required. This allows
1095 # more freedom in the merge order calculation for solving
1096 # circular dependencies. Don't convert to PDEPEND since that
1097 # could make --with-bdeps=y less effective if it is used to
1098 # adjust merge order to prevent built_with_use() calls from
1100 bdeps_optional = True
1102 # built packages do not have build time dependencies.
1103 edepend["DEPEND"] = ""
1105 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1106 edepend["DEPEND"] = ""
1112 root_deps = self._frozen_config.myopts.get("--root-deps")
1113 if root_deps is not None:
1114 if root_deps is True:
1116 elif root_deps == "rdeps":
1117 edepend["DEPEND"] = ""
1120 (bdeps_root, edepend["DEPEND"],
1121 self._priority(buildtime=(not bdeps_optional),
1122 optional=bdeps_optional)),
1123 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
1124 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
1127 debug = "--debug" in self._frozen_config.myopts
1128 strict = mytype != "installed"
1131 portage.dep._dep_check_strict = False
1133 for dep_root, dep_string, dep_priority in deps:
1137 writemsg_level("\nParent: %s\n" % (pkg,),
1138 noiselevel=-1, level=logging.DEBUG)
1139 writemsg_level("Depstring: %s\n" % (dep_string,),
1140 noiselevel=-1, level=logging.DEBUG)
1141 writemsg_level("Priority: %s\n" % (dep_priority,),
1142 noiselevel=-1, level=logging.DEBUG)
1146 dep_string = portage.dep.paren_normalize(
1147 portage.dep.use_reduce(
1148 portage.dep.paren_reduce(dep_string),
1149 uselist=pkg.use.enabled))
1151 dep_string = list(self._queue_disjunctive_deps(
1152 pkg, dep_root, dep_priority, dep_string))
1154 except portage.exception.InvalidDependString as e:
1158 show_invalid_depstring_notice(pkg, dep_string, str(e))
1164 dep_string = portage.dep.paren_enclose(dep_string)
1166 if not self._add_pkg_dep_string(
1167 pkg, dep_root, dep_priority, dep_string,
1171 except portage.exception.AmbiguousPackageName as e:
1173 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1174 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1176 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1177 portage.writemsg("\n", noiselevel=-1)
1178 if mytype == "binary":
1180 "!!! This binary package cannot be installed: '%s'\n" % \
1181 mykey, noiselevel=-1)
1182 elif mytype == "ebuild":
1183 portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
1184 myebuild, mylocation = portdb.findname2(mykey)
1185 portage.writemsg("!!! This ebuild cannot be installed: " + \
1186 "'%s'\n" % myebuild, noiselevel=-1)
1187 portage.writemsg("!!! Please notify the package maintainer " + \
1188 "that atoms must be fully-qualified.\n", noiselevel=-1)
1191 portage.dep._dep_check_strict = True
1194 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1196 depth = pkg.depth + 1
1197 debug = "--debug" in self._frozen_config.myopts
1198 strict = pkg.type_name != "installed"
1201 writemsg_level("\nParent: %s\n" % (pkg,),
1202 noiselevel=-1, level=logging.DEBUG)
1203 writemsg_level("Depstring: %s\n" % (dep_string,),
1204 noiselevel=-1, level=logging.DEBUG)
1205 writemsg_level("Priority: %s\n" % (dep_priority,),
1206 noiselevel=-1, level=logging.DEBUG)
1209 selected_atoms = self._select_atoms(dep_root,
1210 dep_string, myuse=pkg.use.enabled, parent=pkg,
1211 strict=strict, priority=dep_priority)
1212 except portage.exception.InvalidDependString as e:
1213 show_invalid_depstring_notice(pkg, dep_string, str(e))
1220 writemsg_level("Candidates: %s\n" % \
1221 ([str(x) for x in selected_atoms[pkg]],),
1222 noiselevel=-1, level=logging.DEBUG)
1224 root_config = self._frozen_config.roots[dep_root]
1225 vardb = root_config.trees["vartree"].dbapi
1227 for atom, child in self._minimize_children(
1228 pkg, dep_priority, root_config, selected_atoms[pkg]):
1230 mypriority = dep_priority.copy()
1231 if not atom.blocker and vardb.match(atom):
1232 mypriority.satisfied = True
1234 if not self._add_dep(Dependency(atom=atom,
1235 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1236 priority=mypriority, root=dep_root),
1237 allow_unsatisfied=allow_unsatisfied):
1240 selected_atoms.pop(pkg)
1242 # Add selected indirect virtual deps to the graph. This
1243 # takes advantage of circular dependency avoidance that's done
1244 # by dep_zapdeps. We preserve actual parent/child relationships
1245 # here in order to avoid distorting the dependency graph like
1246 # <=portage-2.1.6.x did.
1247 for virt_pkg, atoms in selected_atoms.items():
1250 writemsg_level("Candidates: %s: %s\n" % \
1251 (virt_pkg.cpv, [str(x) for x in atoms]),
1252 noiselevel=-1, level=logging.DEBUG)
1254 # Just assume depth + 1 here for now, though it's not entirely
1255 # accurate since multilple levels of indirect virtual deps may
1256 # have been traversed. The _add_pkg call will reset the depth to
1257 # 0 if this package happens to match an argument.
1258 if not self._add_pkg(virt_pkg,
1259 Dependency(atom=Atom('=' + virt_pkg.cpv),
1260 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1264 for atom, child in self._minimize_children(
1265 pkg, self._priority(runtime=True), root_config, atoms):
1266 # This is a GLEP 37 virtual, so its deps are all runtime.
1267 mypriority = self._priority(runtime=True)
1268 if not atom.blocker and vardb.match(atom):
1269 mypriority.satisfied = True
1271 if not self._add_dep(Dependency(atom=atom,
1272 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1273 parent=virt_pkg, priority=mypriority, root=dep_root),
1274 allow_unsatisfied=allow_unsatisfied):
1278 writemsg_level("Exiting... %s\n" % (pkg,),
1279 noiselevel=-1, level=logging.DEBUG)
1283 def _minimize_children(self, parent, priority, root_config, atoms):
1285 Selects packages to satisfy the given atoms, and minimizes the
1286 number of selected packages. This serves to identify and eliminate
1287 redundant package selections when multiple atoms happen to specify
1297 dep_pkg, existing_node = self._select_package(
1298 root_config.root, atom)
1302 atom_pkg_map[atom] = dep_pkg
1304 if len(atom_pkg_map) < 2:
1305 for item in atom_pkg_map.items():
1311 for atom, pkg in atom_pkg_map.items():
1312 pkg_atom_map.setdefault(pkg, set()).add(atom)
1313 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1315 for cp, pkgs in cp_pkg_map.items():
1318 for atom in pkg_atom_map[pkg]:
1322 # Use a digraph to identify and eliminate any
1323 # redundant package selections.
1324 atom_pkg_graph = digraph()
1327 for atom in pkg_atom_map[pkg1]:
1329 atom_pkg_graph.add(pkg1, atom)
1330 atom_set = InternalPackageSet(initial_atoms=(atom,))
1334 if atom_set.findAtomForPackage(pkg2):
1335 atom_pkg_graph.add(pkg2, atom)
1338 eliminate_pkg = True
1339 for atom in atom_pkg_graph.parent_nodes(pkg):
1340 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1341 eliminate_pkg = False
1344 atom_pkg_graph.remove(pkg)
1346 # Yield < and <= atoms first, since those are more likely to
1347 # cause slot conflicts, and we want those atoms to be displayed
1348 # in the resulting slot conflict message (see bug #291142).
1351 for atom in cp_atoms:
1352 if atom.operator in ('<', '<='):
1353 less_than.append(atom)
1355 not_less_than.append(atom)
1357 for atom in chain(less_than, not_less_than):
1358 child_pkgs = atom_pkg_graph.child_nodes(atom)
1359 yield (atom, child_pkgs[0])
1361 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1363 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1364 Yields non-disjunctive deps. Raises InvalidDependString when
1368 while i < len(dep_struct):
1370 if isinstance(x, list):
1371 for y in self._queue_disjunctive_deps(
1372 pkg, dep_root, dep_priority, x):
1375 self._queue_disjunction(pkg, dep_root, dep_priority,
1376 [ x, dep_struct[ i + 1 ] ] )
1380 x = portage.dep.Atom(x)
1381 except portage.exception.InvalidAtom:
1382 if not pkg.installed:
1383 raise portage.exception.InvalidDependString(
1384 "invalid atom: '%s'" % x)
1386 # Note: Eventually this will check for PROPERTIES=virtual
1387 # or whatever other metadata gets implemented for this
1389 if x.cp.startswith('virtual/'):
1390 self._queue_disjunction( pkg, dep_root,
1391 dep_priority, [ str(x) ] )
1396 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1397 self._dynamic_config._dep_disjunctive_stack.append(
1398 (pkg, dep_root, dep_priority, dep_struct))
1400 def _pop_disjunction(self, allow_unsatisfied):
1402 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1403 populate self._dynamic_config._dep_stack.
1405 pkg, dep_root, dep_priority, dep_struct = \
1406 self._dynamic_config._dep_disjunctive_stack.pop()
1407 dep_string = portage.dep.paren_enclose(dep_struct)
1408 if not self._add_pkg_dep_string(
1409 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1413 def _priority(self, **kwargs):
1414 if "remove" in self._dynamic_config.myparams:
1415 priority_constructor = UnmergeDepPriority
1417 priority_constructor = DepPriority
1418 return priority_constructor(**kwargs)
1420 def _dep_expand(self, root_config, atom_without_category):
1422 @param root_config: a root config instance
1423 @type root_config: RootConfig
1424 @param atom_without_category: an atom without a category component
1425 @type atom_without_category: String
1427 @returns: a list of atoms containing categories (possibly empty)
1429 null_cp = portage.dep_getkey(insert_category_into_atom(
1430 atom_without_category, "null"))
1431 cat, atom_pn = portage.catsplit(null_cp)
1433 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1435 for db, pkg_type, built, installed, db_keys in dbs:
1436 for cat in db.categories:
1437 if db.cp_list("%s/%s" % (cat, atom_pn)):
1441 for cat in categories:
1442 deps.append(Atom(insert_category_into_atom(
1443 atom_without_category, cat)))
1446 def _have_new_virt(self, root, atom_cp):
1448 for db, pkg_type, built, installed, db_keys in \
1449 self._dynamic_config._filtered_trees[root]["dbs"]:
1450 if db.cp_list(atom_cp):
1455 def _iter_atoms_for_pkg(self, pkg):
1456 # TODO: add multiple $ROOT support
1457 if pkg.root != self._frozen_config.target_root:
1459 atom_arg_map = self._dynamic_config._atom_arg_map
1460 root_config = self._frozen_config.roots[pkg.root]
1461 for atom in self._dynamic_config._set_atoms.iterAtomsForPackage(pkg):
1462 if atom.cp != pkg.cp and \
1463 self._have_new_virt(pkg.root, atom.cp):
1466 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1467 visible_pkgs.reverse() # descending order
1469 for visible_pkg in visible_pkgs:
1470 if visible_pkg.cp != atom.cp:
1472 if pkg >= visible_pkg:
1473 # This is descending order, and we're not
1474 # interested in any versions <= pkg given.
1476 if pkg.slot_atom != visible_pkg.slot_atom:
1477 higher_slot = visible_pkg
1479 if higher_slot is not None:
1481 for arg in atom_arg_map[(atom, pkg.root)]:
1482 if isinstance(arg, PackageArg) and \
1487 def select_files(self, myfiles):
1488 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1489 self._dynamic_config._initial_arg_list and call self._resolve to create the
1490 appropriate depgraph and return a favorite list."""
1492 debug = "--debug" in self._frozen_config.myopts
1493 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1494 sets = root_config.sets
1495 getSetAtoms = root_config.setconfig.getSetAtoms
1497 myroot = self._frozen_config.target_root
1498 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1499 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1500 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1501 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1502 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1503 pkgsettings = self._frozen_config.pkgsettings[myroot]
1505 onlydeps = "--onlydeps" in self._frozen_config.myopts
1508 ext = os.path.splitext(x)[1]
1510 if not os.path.exists(x):
1512 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1513 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1514 elif os.path.exists(
1515 os.path.join(pkgsettings["PKGDIR"], x)):
1516 x = os.path.join(pkgsettings["PKGDIR"], x)
1518 print("\n\n!!! Binary package '"+str(x)+"' does not exist.")
1519 print("!!! Please ensure the tbz2 exists as specified.\n")
1520 return 0, myfavorites
1521 mytbz2=portage.xpak.tbz2(x)
1522 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1523 if os.path.realpath(x) != \
1524 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1525 print(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n"))
1526 return 0, myfavorites
1528 pkg = self._pkg(mykey, "binary", root_config,
1530 args.append(PackageArg(arg=x, package=pkg,
1531 root_config=root_config))
1532 elif ext==".ebuild":
1533 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1534 pkgdir = os.path.dirname(ebuild_path)
1535 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1536 cp = pkgdir[len(tree_root)+1:]
1537 e = portage.exception.PackageNotFound(
1538 ("%s is not in a valid portage tree " + \
1539 "hierarchy or does not exist") % x)
1540 if not portage.isvalidatom(cp):
1542 cat = portage.catsplit(cp)[0]
1543 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1544 if not portage.isvalidatom("="+mykey):
1546 ebuild_path = portdb.findname(mykey)
1548 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1549 cp, os.path.basename(ebuild_path)):
1550 print(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n"))
1551 return 0, myfavorites
1552 if mykey not in portdb.xmatch(
1553 "match-visible", portage.dep_getkey(mykey)):
1554 print(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use"))
1555 print(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man"))
1556 print(colorize("BAD", "*** page for details."))
1557 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1560 raise portage.exception.PackageNotFound(
1561 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1562 pkg = self._pkg(mykey, "ebuild", root_config,
1564 args.append(PackageArg(arg=x, package=pkg,
1565 root_config=root_config))
1566 elif x.startswith(os.path.sep):
1567 if not x.startswith(myroot):
1568 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1569 " $ROOT.\n") % x, noiselevel=-1)
1571 # Queue these up since it's most efficient to handle
1572 # multiple files in a single iter_owners() call.
1573 lookup_owners.append(x)
1575 if x in ("system", "world"):
1577 if x.startswith(SETPREFIX):
1578 s = x[len(SETPREFIX):]
1580 raise portage.exception.PackageSetNotFound(s)
1581 if s in self._dynamic_config._sets:
1583 # Recursively expand sets so that containment tests in
1584 # self._get_parent_sets() properly match atoms in nested
1585 # sets (like if world contains system).
1586 expanded_set = InternalPackageSet(
1587 initial_atoms=getSetAtoms(s))
1588 self._dynamic_config._sets[s] = expanded_set
1589 args.append(SetArg(arg=x, set=expanded_set,
1590 root_config=root_config))
1592 if not is_valid_package_atom(x):
1593 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1595 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1596 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1598 # Don't expand categories or old-style virtuals here unless
1599 # necessary. Expansion of old-style virtuals here causes at
1600 # least the following problems:
1601 # 1) It's more difficult to determine which set(s) an atom
1602 # came from, if any.
1603 # 2) It takes away freedom from the resolver to choose other
1604 # possible expansions when necessary.
1606 args.append(AtomArg(arg=x, atom=Atom(x),
1607 root_config=root_config))
1609 expanded_atoms = self._dep_expand(root_config, x)
1610 installed_cp_set = set()
1611 for atom in expanded_atoms:
1612 if vardb.cp_list(atom.cp):
1613 installed_cp_set.add(atom.cp)
1615 if len(installed_cp_set) > 1:
1616 non_virtual_cps = set()
1617 for atom_cp in installed_cp_set:
1618 if not atom_cp.startswith("virtual/"):
1619 non_virtual_cps.add(atom_cp)
1620 if len(non_virtual_cps) == 1:
1621 installed_cp_set = non_virtual_cps
1623 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1624 installed_cp = next(iter(installed_cp_set))
1625 expanded_atoms = [atom for atom in expanded_atoms \
1626 if atom.cp == installed_cp]
1628 # If a non-virtual package and one or more virtual packages
1629 # are in expanded_atoms, use the non-virtual package.
1630 if len(expanded_atoms) > 1:
1631 number_of_virtuals = 0
1632 for expanded_atom in expanded_atoms:
1633 if expanded_atom.cp.startswith("virtual/"):
1634 number_of_virtuals += 1
1636 candidate = expanded_atom
1637 if len(expanded_atoms) - number_of_virtuals == 1:
1638 expanded_atoms = [ candidate ]
1640 if len(expanded_atoms) > 1:
1643 ambiguous_package_name(x, expanded_atoms, root_config,
1644 self._frozen_config.spinner, self._frozen_config.myopts)
1645 return False, myfavorites
1647 atom = expanded_atoms[0]
1649 null_atom = Atom(insert_category_into_atom(x, "null"))
1650 cat, atom_pn = portage.catsplit(null_atom.cp)
1651 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1653 # Allow the depgraph to choose which virtual.
1654 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1658 args.append(AtomArg(arg=x, atom=atom,
1659 root_config=root_config))
1663 search_for_multiple = False
1664 if len(lookup_owners) > 1:
1665 search_for_multiple = True
1667 for x in lookup_owners:
1668 if not search_for_multiple and os.path.isdir(x):
1669 search_for_multiple = True
1670 relative_paths.append(x[len(myroot)-1:])
1673 for pkg, relative_path in \
1674 real_vardb._owners.iter_owners(relative_paths):
1675 owners.add(pkg.mycpv)
1676 if not search_for_multiple:
1680 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1681 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1685 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1687 # portage now masks packages with missing slot, but it's
1688 # possible that one was installed by an older version
1689 atom = Atom(portage.cpv_getkey(cpv))
1691 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1692 args.append(AtomArg(arg=atom, atom=atom,
1693 root_config=root_config))
1695 if "--update" in self._frozen_config.myopts:
1696 # In some cases, the greedy slots behavior can pull in a slot that
1697 # the user would want to uninstall due to it being blocked by a
1698 # newer version in a different slot. Therefore, it's necessary to
1699 # detect and discard any that should be uninstalled. Each time
1700 # that arguments are updated, package selections are repeated in
1701 # order to ensure consistency with the current arguments:
1703 # 1) Initialize args
1704 # 2) Select packages and generate initial greedy atoms
1705 # 3) Update args with greedy atoms
1706 # 4) Select packages and generate greedy atoms again, while
1707 # accounting for any blockers between selected packages
1708 # 5) Update args with revised greedy atoms
1710 self._set_args(args)
1713 greedy_args.append(arg)
1714 if not isinstance(arg, AtomArg):
1716 for atom in self._greedy_slots(arg.root_config, arg.atom):
1718 AtomArg(arg=arg.arg, atom=atom,
1719 root_config=arg.root_config))
1721 self._set_args(greedy_args)
1724 # Revise greedy atoms, accounting for any blockers
1725 # between selected packages.
1726 revised_greedy_args = []
1728 revised_greedy_args.append(arg)
1729 if not isinstance(arg, AtomArg):
1731 for atom in self._greedy_slots(arg.root_config, arg.atom,
1732 blocker_lookahead=True):
1733 revised_greedy_args.append(
1734 AtomArg(arg=arg.arg, atom=atom,
1735 root_config=arg.root_config))
1736 args = revised_greedy_args
1737 del revised_greedy_args
1739 self._set_args(args)
1741 myfavorites = set(myfavorites)
1743 if isinstance(arg, (AtomArg, PackageArg)):
1744 myfavorites.add(arg.atom)
1745 elif isinstance(arg, SetArg):
1746 myfavorites.add(arg.arg)
1747 myfavorites = list(myfavorites)
1750 portage.writemsg("\n", noiselevel=-1)
1751 # Order needs to be preserved since a feature of --nodeps
1752 # is to allow the user to force a specific merge order.
1753 self._dynamic_config._initial_arg_list = args[:]
1755 return self._resolve(myfavorites)
1757 def _resolve(self, myfavorites):
1758 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1759 call self._creategraph to process theier deps and return
1761 debug = "--debug" in self._frozen_config.myopts
1762 onlydeps = "--onlydeps" in self._frozen_config.myopts
1763 myroot = self._frozen_config.target_root
1764 pkgsettings = self._frozen_config.pkgsettings[myroot]
1765 pprovideddict = pkgsettings.pprovideddict
1766 virtuals = pkgsettings.getvirtuals()
1767 for arg in self._dynamic_config._initial_arg_list:
1768 for atom in arg.set:
1769 self._spinner_update()
1770 dep = Dependency(atom=atom, onlydeps=onlydeps,
1771 root=myroot, parent=arg)
1773 pprovided = pprovideddict.get(atom.cp)
1774 if pprovided and portage.match_from_list(atom, pprovided):
1775 # A provided package has been specified on the command line.
1776 self._dynamic_config._pprovided_args.append((arg, atom))
1778 if isinstance(arg, PackageArg):
1779 if not self._add_pkg(arg.package, dep) or \
1780 not self._create_graph():
1781 if not self._dynamic_config._need_restart:
1782 sys.stderr.write(("\n\n!!! Problem " + \
1783 "resolving dependencies for %s\n") % \
1785 return 0, myfavorites
1788 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1789 (arg, atom), noiselevel=-1)
1790 pkg, existing_node = self._select_package(
1791 myroot, atom, onlydeps=onlydeps)
1793 pprovided_match = False
1794 for virt_choice in virtuals.get(atom.cp, []):
1795 expanded_atom = portage.dep.Atom(
1796 atom.replace(atom.cp,
1797 portage.dep_getkey(virt_choice), 1))
1798 pprovided = pprovideddict.get(expanded_atom.cp)
1800 portage.match_from_list(expanded_atom, pprovided):
1801 # A provided package has been
1802 # specified on the command line.
1803 self._dynamic_config._pprovided_args.append((arg, atom))
1804 pprovided_match = True
1809 if not (isinstance(arg, SetArg) and \
1810 arg.name in ("selected", "system", "world")):
1811 self._dynamic_config._unsatisfied_deps_for_display.append(
1812 ((myroot, atom), {}))
1813 return 0, myfavorites
1814 self._dynamic_config._missing_args.append((arg, atom))
1816 if atom.cp != pkg.cp:
1817 # For old-style virtuals, we need to repeat the
1818 # package.provided check against the selected package.
1819 expanded_atom = atom.replace(atom.cp, pkg.cp)
1820 pprovided = pprovideddict.get(pkg.cp)
1822 portage.match_from_list(expanded_atom, pprovided):
1823 # A provided package has been
1824 # specified on the command line.
1825 self._dynamic_config._pprovided_args.append((arg, atom))
1827 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1828 self._dynamic_config._unsatisfied_deps_for_display.append(
1829 ((myroot, atom), {}))
1830 # Previous behavior was to bail out in this case, but
1831 # since the dep is satisfied by the installed package,
1832 # it's more friendly to continue building the graph
1833 # and just show a warning message. Therefore, only bail
1834 # out here if the atom is not from either the system or
1836 if not (isinstance(arg, SetArg) and \
1837 arg.name in ("selected", "system", "world")):
1838 return 0, myfavorites
1840 # Add the selected package to the graph as soon as possible
1841 # so that later dep_check() calls can use it as feedback
1842 # for making more consistent atom selections.
1843 if not self._add_pkg(pkg, dep):
1844 if self._dynamic_config._need_restart:
1846 elif isinstance(arg, SetArg):
1847 sys.stderr.write(("\n\n!!! Problem resolving " + \
1848 "dependencies for %s from %s\n") % \
1851 sys.stderr.write(("\n\n!!! Problem resolving " + \
1852 "dependencies for %s\n") % atom)
1853 return 0, myfavorites
1855 except portage.exception.MissingSignature as e:
1856 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1857 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1858 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1859 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1860 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1861 return 0, myfavorites
1862 except portage.exception.InvalidSignature as e:
1863 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1864 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1865 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1866 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1867 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1868 return 0, myfavorites
1869 except SystemExit as e:
1870 raise # Needed else can't exit
1871 except Exception as e:
1872 print("\n\n!!! Problem in '%s' dependencies." % atom, file=sys.stderr)
1873 print("!!!", str(e), getattr(e, "__module__", None), file=sys.stderr)
1876 # Now that the root packages have been added to the graph,
1877 # process the dependencies.
1878 if not self._create_graph():
1879 return 0, myfavorites
1882 if "--usepkgonly" in self._frozen_config.myopts:
1883 for xs in self._dynamic_config.digraph.all_nodes():
1884 if not isinstance(xs, Package):
1886 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1890 print("Missing binary for:",xs[2])
1894 except self._unknown_internal_error:
1895 return False, myfavorites
1897 # We're true here unless we are missing binaries.
1898 return (not missing,myfavorites)
1900 def _set_args(self, args):
1902 Create the "args" package set from atoms and packages given as
1903 arguments. This method can be called multiple times if necessary.
1904 The package selection cache is automatically invalidated, since
1905 arguments influence package selections.
1907 args_set = self._dynamic_config._sets["args"]
1910 if not isinstance(arg, (AtomArg, PackageArg)):
1913 if atom in args_set:
1917 self._dynamic_config._set_atoms.clear()
1918 self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.values()))
1919 atom_arg_map = self._dynamic_config._atom_arg_map
1920 atom_arg_map.clear()
1922 for atom in arg.set:
1923 atom_key = (atom, arg.root_config.root)
1924 refs = atom_arg_map.get(atom_key)
1927 atom_arg_map[atom_key] = refs
1931 # Invalidate the package selection cache, since
1932 # arguments influence package selections.
1933 self._dynamic_config._highest_pkg_cache.clear()
1934 for trees in self._dynamic_config._filtered_trees.values():
1935 trees["porttree"].dbapi._clear_cache()
1937 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1939 Return a list of slot atoms corresponding to installed slots that
1940 differ from the slot of the highest visible match. When
1941 blocker_lookahead is True, slot atoms that would trigger a blocker
1942 conflict are automatically discarded, potentially allowing automatic
1943 uninstallation of older slots when appropriate.
1945 highest_pkg, in_graph = self._select_package(root_config.root, atom)
1946 if highest_pkg is None:
1948 vardb = root_config.trees["vartree"].dbapi
1950 for cpv in vardb.match(atom):
1951 # don't mix new virtuals with old virtuals
1952 if portage.cpv_getkey(cpv) == highest_pkg.cp:
1953 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1955 slots.add(highest_pkg.metadata["SLOT"])
1959 slots.remove(highest_pkg.metadata["SLOT"])
1962 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1963 pkg, in_graph = self._select_package(root_config.root, slot_atom)
1964 if pkg is not None and \
1965 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1966 greedy_pkgs.append(pkg)
1969 if not blocker_lookahead:
1970 return [pkg.slot_atom for pkg in greedy_pkgs]
1973 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1974 for pkg in greedy_pkgs + [highest_pkg]:
1975 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1977 selected_atoms = self._select_atoms(
1978 pkg.root, dep_str, pkg.use.enabled,
1979 parent=pkg, strict=True)
1980 except portage.exception.InvalidDependString:
1983 for atoms in selected_atoms.values():
1984 blocker_atoms.extend(x for x in atoms if x.blocker)
1985 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
1987 if highest_pkg not in blockers:
1990 # filter packages with invalid deps
1991 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
1993 # filter packages that conflict with highest_pkg
1994 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
1995 (blockers[highest_pkg].findAtomForPackage(pkg) or \
1996 blockers[pkg].findAtomForPackage(highest_pkg))]
2001 # If two packages conflict, discard the lower version.
2002 discard_pkgs = set()
2003 greedy_pkgs.sort(reverse=True)
2004 for i in range(len(greedy_pkgs) - 1):
2005 pkg1 = greedy_pkgs[i]
2006 if pkg1 in discard_pkgs:
2008 for j in range(i + 1, len(greedy_pkgs)):
2009 pkg2 = greedy_pkgs[j]
2010 if pkg2 in discard_pkgs:
2012 if blockers[pkg1].findAtomForPackage(pkg2) or \
2013 blockers[pkg2].findAtomForPackage(pkg1):
2015 discard_pkgs.add(pkg2)
2017 return [pkg.slot_atom for pkg in greedy_pkgs \
2018 if pkg not in discard_pkgs]
2020 def _select_atoms_from_graph(self, *pargs, **kwargs):
2022 Prefer atoms matching packages that have already been
2023 added to the graph or those that are installed and have
2024 not been scheduled for replacement.
2026 kwargs["trees"] = self._dynamic_config._graph_trees
2027 return self._select_atoms_highest_available(*pargs, **kwargs)
2029 def _select_atoms_highest_available(self, root, depstring,
2030 myuse=None, parent=None, strict=True, trees=None, priority=None):
2031 """This will raise InvalidDependString if necessary. If trees is
2032 None then self._dynamic_config._filtered_trees is used."""
2033 pkgsettings = self._frozen_config.pkgsettings[root]
2035 trees = self._dynamic_config._filtered_trees
2036 atom_graph = digraph()
2039 if parent is not None:
2040 trees[root]["parent"] = parent
2041 trees[root]["atom_graph"] = atom_graph
2042 if priority is not None:
2043 trees[root]["priority"] = priority
2045 portage.dep._dep_check_strict = False
2046 mycheck = portage.dep_check(depstring, None,
2047 pkgsettings, myuse=myuse,
2048 myroot=root, trees=trees)
2050 if parent is not None:
2051 trees[root].pop("parent")
2052 trees[root].pop("atom_graph")
2053 if priority is not None:
2054 trees[root].pop("priority")
2055 portage.dep._dep_check_strict = True
2057 raise portage.exception.InvalidDependString(mycheck[1])
2059 selected_atoms = mycheck[1]
2061 chosen_atoms = frozenset(mycheck[1])
2062 selected_atoms = {parent : []}
2063 for node in atom_graph:
2064 if isinstance(node, Atom):
2069 pkg, virt_atom = node
2070 if virt_atom not in chosen_atoms:
2072 if not portage.match_from_list(virt_atom, [pkg]):
2073 # Typically this means that the atom
2074 # specifies USE deps that are unsatisfied
2075 # by the selected package. The caller will
2076 # record this as an unsatisfied dependency
2080 selected_atoms[pkg] = [atom for atom in \
2081 atom_graph.child_nodes(node) if atom in chosen_atoms]
2083 return selected_atoms
2085 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2086 check_backtrack=False):
2088 When check_backtrack=True, no output is produced and
2089 the method either returns or raises _backtrack_mask if
2090 a matching package has been masked by backtracking.
2092 backtrack_mask = False
2093 atom_set = InternalPackageSet(initial_atoms=(atom,))
2094 xinfo = '"%s"' % atom
2097 # Discard null/ from failed cpv_expand category expansion.
2098 xinfo = xinfo.replace("null/", "")
2099 masked_packages = []
2101 masked_pkg_instances = set()
2102 missing_licenses = []
2103 have_eapi_mask = False
2104 pkgsettings = self._frozen_config.pkgsettings[root]
2105 implicit_iuse = pkgsettings._get_implicit_iuse()
2106 root_config = self._frozen_config.roots[root]
2107 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2108 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2109 for db, pkg_type, built, installed, db_keys in dbs:
2113 if hasattr(db, "xmatch"):
2114 cpv_list = db.xmatch("match-all", atom.without_use)
2116 cpv_list = db.match(atom.without_use)
2119 for cpv in cpv_list:
2120 metadata, mreasons = get_mask_info(root_config, cpv,
2121 pkgsettings, db, pkg_type, built, installed, db_keys)
2122 if metadata is not None:
2123 pkg = self._pkg(cpv, pkg_type, root_config,
2124 installed=installed)
2125 # pkg.metadata contains calculated USE for ebuilds,
2126 # required later for getMissingLicenses.
2127 metadata = pkg.metadata
2128 if pkg.cp != atom.cp:
2129 # A cpv can be returned from dbapi.match() as an
2130 # old-style virtual match even in cases when the
2131 # package does not actually PROVIDE the virtual.
2132 # Filter out any such false matches here.
2133 if not atom_set.findAtomForPackage(pkg):
2135 if pkg in self._dynamic_config._runtime_pkg_mask:
2136 backtrack_reasons = \
2137 self._dynamic_config._runtime_pkg_mask[pkg]
2138 mreasons.append('backtracking: %s' % \
2139 ', '.join(sorted(backtrack_reasons)))
2140 backtrack_mask = True
2142 masked_pkg_instances.add(pkg)
2144 missing_use.append(pkg)
2147 masked_packages.append(
2148 (root_config, pkgsettings, cpv, metadata, mreasons))
2152 raise self._backtrack_mask()
2156 missing_use_reasons = []
2157 missing_iuse_reasons = []
2158 for pkg in missing_use:
2159 use = pkg.use.enabled
2160 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
2161 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
2163 for x in atom.use.required:
2164 if iuse_re.match(x) is None:
2165 missing_iuse.append(x)
2168 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2169 missing_iuse_reasons.append((pkg, mreasons))
2171 need_enable = sorted(atom.use.enabled.difference(use))
2172 need_disable = sorted(atom.use.disabled.intersection(use))
2173 if need_enable or need_disable:
2175 changes.extend(colorize("red", "+" + x) \
2176 for x in need_enable)
2177 changes.extend(colorize("blue", "-" + x) \
2178 for x in need_disable)
2179 mreasons.append("Change USE: %s" % " ".join(changes))
2180 missing_use_reasons.append((pkg, mreasons))
2182 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2183 in missing_use_reasons if pkg not in masked_pkg_instances]
2185 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2186 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2188 show_missing_use = False
2189 if unmasked_use_reasons:
2190 # Only show the latest version.
2191 show_missing_use = unmasked_use_reasons[:1]
2192 elif unmasked_iuse_reasons:
2193 if missing_use_reasons:
2194 # All packages with required IUSE are masked,
2195 # so display a normal masking message.
2198 show_missing_use = unmasked_iuse_reasons
2202 if show_missing_use:
2203 print("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".")
2204 print("!!! One of the following packages is required to complete your request:")
2205 for pkg, mreasons in show_missing_use:
2206 print("- "+pkg.cpv+" ("+", ".join(mreasons)+")")
2208 elif masked_packages:
2210 colorize("BAD", "All ebuilds that could satisfy ") + \
2211 colorize("INFORM", xinfo) + \
2212 colorize("BAD", " have been masked."))
2213 print("!!! One of the following masked packages is required to complete your request:")
2214 have_eapi_mask = show_masked_packages(masked_packages)
2217 msg = ("The current version of portage supports " + \
2218 "EAPI '%s'. You must upgrade to a newer version" + \
2219 " of portage before EAPI masked packages can" + \
2220 " be installed.") % portage.const.EAPI
2221 from textwrap import wrap
2222 for line in wrap(msg, 75):
2227 print("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".")
2229 # Show parent nodes and the argument that pulled them in.
2230 traversed_nodes = set()
2233 while node is not None:
2234 traversed_nodes.add(node)
2235 msg.append('(dependency required by "%s" [%s])' % \
2236 (colorize('INFORM', str(node.cpv)), node.type_name))
2238 if node not in self._dynamic_config.digraph:
2239 # The parent is not in the graph due to backtracking.
2242 # When traversing to parents, prefer arguments over packages
2243 # since arguments are root nodes. Never traverse the same
2244 # package twice, in order to prevent an infinite loop.
2245 selected_parent = None
2246 for parent in self._dynamic_config.digraph.parent_nodes(node):
2247 if isinstance(parent, DependencyArg):
2248 msg.append('(dependency required by "%s" [argument])' % \
2249 (colorize('INFORM', str(parent))))
2250 selected_parent = None
2252 if parent not in traversed_nodes:
2253 selected_parent = parent
2254 node = selected_parent
2264 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2266 Iterate over Package instances of pkg_type matching the given atom.
2267 This does not check visibility and it also does not match USE for
2268 unbuilt ebuilds since USE are lazily calculated after visibility
2269 checks (to avoid the expense when possible).
2272 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2274 if hasattr(db, "xmatch"):
2275 cpv_list = db.xmatch("match-all", atom)
2277 cpv_list = db.match(atom)
2279 # USE=multislot can make an installed package appear as if
2280 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2281 # won't do any good as long as USE=multislot is enabled since
2282 # the newly built package still won't have the expected slot.
2283 # Therefore, assume that such SLOT dependencies are already
2284 # satisfied rather than forcing a rebuild.
2285 installed = pkg_type == 'installed'
2286 if installed and not cpv_list and atom.slot:
2287 for cpv in db.match(atom.cp):
2288 slot_available = False
2289 for other_db, other_type, other_built, \
2290 other_installed, other_keys in \
2291 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2294 other_db.aux_get(cpv, ["SLOT"])[0]:
2295 slot_available = True
2299 if not slot_available:
2301 inst_pkg = self._pkg(cpv, "installed",
2302 root_config, installed=installed)
2303 # Remove the slot from the atom and verify that
2304 # the package matches the resulting atom.
2305 atom_without_slot = portage.dep.remove_slot(atom)
2307 atom_without_slot += str(atom.use)
2308 atom_without_slot = portage.dep.Atom(atom_without_slot)
2309 if portage.match_from_list(
2310 atom_without_slot, [inst_pkg]):
2311 cpv_list = [inst_pkg.cpv]
2318 for cpv in cpv_list:
2320 pkg = self._pkg(cpv, pkg_type, root_config,
2321 installed=installed, onlydeps=onlydeps)
2322 except portage.exception.PackageNotFound:
2325 if pkg.cp != atom.cp:
2326 # A cpv can be returned from dbapi.match() as an
2327 # old-style virtual match even in cases when the
2328 # package does not actually PROVIDE the virtual.
2329 # Filter out any such false matches here.
2330 if not InternalPackageSet(initial_atoms=(atom,)
2331 ).findAtomForPackage(pkg):
2335 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2336 cache_key = (root, atom, onlydeps)
2337 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2340 if pkg and not existing:
2341 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2342 if existing and existing == pkg:
2343 # Update the cache to reflect that the
2344 # package has been added to the graph.
2346 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2348 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2349 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2352 settings = pkg.root_config.settings
2353 if visible(settings, pkg) and not (pkg.installed and \
2354 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2355 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2358 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2359 root_config = self._frozen_config.roots[root]
2360 pkgsettings = self._frozen_config.pkgsettings[root]
2361 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2362 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2363 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2364 # List of acceptable packages, ordered by type preference.
2365 matched_packages = []
2366 highest_version = None
2367 if not isinstance(atom, portage.dep.Atom):
2368 atom = portage.dep.Atom(atom)
2370 atom_set = InternalPackageSet(initial_atoms=(atom,))
2371 existing_node = None
2373 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2374 empty = "empty" in self._dynamic_config.myparams
2375 selective = "selective" in self._dynamic_config.myparams
2377 noreplace = "--noreplace" in self._frozen_config.myopts
2378 avoid_update = "--update" not in self._frozen_config.myopts
2379 use_ebuild_visibility = self._frozen_config.myopts.get(
2380 '--use-ebuild-visibility', 'n') != 'n'
2381 # Behavior of the "selective" parameter depends on
2382 # whether or not a package matches an argument atom.
2383 # If an installed package provides an old-style
2384 # virtual that is no longer provided by an available
2385 # package, the installed package may match an argument
2386 # atom even though none of the available packages do.
2387 # Therefore, "selective" logic does not consider
2388 # whether or not an installed package matches an
2389 # argument atom. It only considers whether or not
2390 # available packages match argument atoms, which is
2391 # represented by the found_available_arg flag.
2392 found_available_arg = False
2393 for find_existing_node in True, False:
2396 for db, pkg_type, built, installed, db_keys in dbs:
2399 if installed and not find_existing_node:
2400 want_reinstall = reinstall or empty or \
2401 (found_available_arg and not selective)
2402 if want_reinstall and matched_packages:
2405 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom,
2407 if pkg in self._dynamic_config._runtime_pkg_mask:
2408 # The package has been masked by the backtracking logic
2411 # Make --noreplace take precedence over --newuse.
2412 if not pkg.installed and noreplace and \
2413 cpv in vardb.match(atom):
2414 # If the installed version is masked, it may
2415 # be necessary to look at lower versions,
2416 # in case there is a visible downgrade.
2418 reinstall_for_flags = None
2420 if not pkg.installed or \
2421 (matched_packages and not avoid_update):
2422 # Only enforce visibility on installed packages
2423 # if there is at least one other visible package
2424 # available. By filtering installed masked packages
2425 # here, packages that have been masked since they
2426 # were installed can be automatically downgraded
2427 # to an unmasked version.
2429 if not visible(pkgsettings, pkg):
2431 except portage.exception.InvalidDependString:
2435 # Enable upgrade or downgrade to a version
2436 # with visible KEYWORDS when the installed
2437 # version is masked by KEYWORDS, but never
2438 # reinstall the same exact version only due
2439 # to a KEYWORDS mask. See bug #252167.
2440 if matched_packages:
2442 different_version = None
2443 for avail_pkg in matched_packages:
2444 if not portage.dep.cpvequal(
2445 pkg.cpv, avail_pkg.cpv):
2446 different_version = avail_pkg
2448 if different_version is not None:
2449 # If the ebuild no longer exists or it's
2450 # keywords have been dropped, reject built
2451 # instances (installed or binary).
2452 # If --usepkgonly is enabled, assume that
2453 # the ebuild status should be ignored.
2454 if not use_ebuild_visibility and usepkgonly:
2456 pkgsettings._getMissingKeywords(
2457 pkg.cpv, pkg.metadata):
2462 pkg.cpv, "ebuild", root_config)
2463 except portage.exception.PackageNotFound:
2466 if not visible(pkgsettings, pkg_eb):
2469 # Calculation of USE for unbuilt ebuilds is relatively
2470 # expensive, so it is only performed lazily, after the
2471 # above visibility checks are complete.
2474 if root == self._frozen_config.target_root:
2476 myarg = next(self._iter_atoms_for_pkg(pkg))
2477 except StopIteration:
2479 except portage.exception.InvalidDependString:
2481 # masked by corruption
2483 if not installed and myarg:
2484 found_available_arg = True
2486 if atom.use and not pkg.built:
2487 use = pkg.use.enabled
2488 if atom.use.enabled.difference(use):
2490 if atom.use.disabled.intersection(use):
2492 if pkg.cp == atom_cp:
2493 if highest_version is None:
2494 highest_version = pkg
2495 elif pkg > highest_version:
2496 highest_version = pkg
2497 # At this point, we've found the highest visible
2498 # match from the current repo. Any lower versions
2499 # from this repo are ignored, so this so the loop
2500 # will always end with a break statement below
2502 if find_existing_node:
2503 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2506 # Use PackageSet.findAtomForPackage()
2507 # for PROVIDE support.
2508 if atom_set.findAtomForPackage(e_pkg):
2509 if highest_version and \
2510 e_pkg.cp == atom_cp and \
2511 e_pkg < highest_version and \
2512 e_pkg.slot_atom != highest_version.slot_atom:
2513 # There is a higher version available in a
2514 # different slot, so this existing node is
2518 matched_packages.append(e_pkg)
2519 existing_node = e_pkg
2521 # Compare built package to current config and
2522 # reject the built package if necessary.
2523 if built and not installed and \
2524 ("--newuse" in self._frozen_config.myopts or \
2525 "--reinstall" in self._frozen_config.myopts or \
2526 "--binpkg-respect-use" in self._frozen_config.myopts):
2527 iuses = pkg.iuse.all
2528 old_use = pkg.use.enabled
2530 pkgsettings.setcpv(myeb)
2532 pkgsettings.setcpv(pkg)
2533 now_use = pkgsettings["PORTAGE_USE"].split()
2534 forced_flags = set()
2535 forced_flags.update(pkgsettings.useforce)
2536 forced_flags.update(pkgsettings.usemask)
2538 if myeb and not usepkgonly:
2539 cur_iuse = myeb.iuse.all
2540 if self._reinstall_for_flags(forced_flags,
2544 # Compare current config to installed package
2545 # and do not reinstall if possible.
2546 if not installed and \
2547 ("--newuse" in self._frozen_config.myopts or \
2548 "--reinstall" in self._frozen_config.myopts) and \
2549 cpv in vardb.match(atom):
2550 pkgsettings.setcpv(pkg)
2551 forced_flags = set()
2552 forced_flags.update(pkgsettings.useforce)
2553 forced_flags.update(pkgsettings.usemask)
2554 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2555 old_iuse = set(filter_iuse_defaults(
2556 vardb.aux_get(cpv, ["IUSE"])[0].split()))
2557 cur_use = pkg.use.enabled
2558 cur_iuse = pkg.iuse.all
2559 reinstall_for_flags = \
2560 self._reinstall_for_flags(
2561 forced_flags, old_use, old_iuse,
2563 if reinstall_for_flags:
2567 matched_packages.append(pkg)
2568 if reinstall_for_flags:
2569 self._dynamic_config._reinstall_nodes[pkg] = \
2573 if not matched_packages:
2576 if "--debug" in self._frozen_config.myopts:
2577 for pkg in matched_packages:
2578 portage.writemsg("%s %s\n" % \
2579 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2581 # Filter out any old-style virtual matches if they are
2582 # mixed with new-style virtual matches.
2584 if len(matched_packages) > 1 and \
2585 "virtual" == portage.catsplit(cp)[0]:
2586 for pkg in matched_packages:
2589 # Got a new-style virtual, so filter
2590 # out any old-style virtuals.
2591 matched_packages = [pkg for pkg in matched_packages \
2595 if len(matched_packages) > 1:
2597 if existing_node is not None:
2598 return existing_node, existing_node
2599 for pkg in matched_packages:
2601 return pkg, existing_node
2603 bestmatch = portage.best(
2604 [pkg.cpv for pkg in matched_packages])
2605 matched_packages = [pkg for pkg in matched_packages \
2606 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2608 # ordered by type preference ("ebuild" type is the last resort)
2609 return matched_packages[-1], existing_node
2611 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2613 Select packages that have already been added to the graph or
2614 those that are installed and have not been scheduled for
2617 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
2618 matches = graph_db.match_pkgs(atom)
2621 pkg = matches[-1] # highest match
2622 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2623 return pkg, in_graph
2625 def _complete_graph(self, required_sets=None):
2627 Add any deep dependencies of required sets (args, system, world) that
2628 have not been pulled into the graph yet. This ensures that the graph
2629 is consistent such that initially satisfied deep dependencies are not
2630 broken in the new graph. Initially unsatisfied dependencies are
2631 irrelevant since we only want to avoid breaking dependencies that are
2634 Since this method can consume enough time to disturb users, it is
2635 currently only enabled by the --complete-graph option.
2637 @param required_sets: contains required sets (currently only used
2638 for depclean and prune removal operations)
2639 @type required_sets: dict
2641 if "--buildpkgonly" in self._frozen_config.myopts or \
2642 "recurse" not in self._dynamic_config.myparams:
2645 if "complete" not in self._dynamic_config.myparams:
2646 # Skip this to avoid consuming enough time to disturb users.
2651 # Put the depgraph into a mode that causes it to only
2652 # select packages that have already been added to the
2653 # graph or those that are installed and have not been
2654 # scheduled for replacement. Also, toggle the "deep"
2655 # parameter so that all dependencies are traversed and
2657 self._select_atoms = self._select_atoms_from_graph
2658 self._select_package = self._select_pkg_from_graph
2659 already_deep = self._dynamic_config.myparams.get("deep") is True
2660 if not already_deep:
2661 self._dynamic_config.myparams["deep"] = True
2663 for root in self._frozen_config.roots:
2664 if root != self._frozen_config.target_root and \
2665 "remove" in self._dynamic_config.myparams:
2666 # Only pull in deps for the relevant root.
2668 if required_sets is None or root not in required_sets:
2669 required_set_names = self._frozen_config._required_set_names.copy()
2671 required_set_names = set(required_sets[root])
2672 if root == self._frozen_config.target_root and \
2673 (already_deep or "empty" in self._dynamic_config.myparams):
2674 required_set_names.difference_update(self._dynamic_config._sets)
2675 if not required_set_names and \
2676 not self._dynamic_config._ignored_deps and \
2677 not self._dynamic_config._dep_stack:
2679 root_config = self._frozen_config.roots[root]
2680 setconfig = root_config.setconfig
2682 # Reuse existing SetArg instances when available.
2683 for arg in self._dynamic_config.digraph.root_nodes():
2684 if not isinstance(arg, SetArg):
2686 if arg.root_config != root_config:
2688 if arg.name in required_set_names:
2690 required_set_names.remove(arg.name)
2691 # Create new SetArg instances only when necessary.
2692 for s in required_set_names:
2693 if required_sets is None or root not in required_sets:
2694 expanded_set = InternalPackageSet(
2695 initial_atoms=setconfig.getSetAtoms(s))
2697 expanded_set = required_sets[root][s]
2698 atom = SETPREFIX + s
2699 args.append(SetArg(arg=atom, set=expanded_set,
2700 root_config=root_config))
2701 if root == self._frozen_config.target_root:
2702 self._dynamic_config._sets[s] = expanded_set
2703 vardb = root_config.trees["vartree"].dbapi
2705 for atom in arg.set:
2706 self._dynamic_config._dep_stack.append(
2707 Dependency(atom=atom, root=root, parent=arg))
2708 if self._dynamic_config._ignored_deps:
2709 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
2710 self._dynamic_config._ignored_deps = []
2711 if not self._create_graph(allow_unsatisfied=True):
2713 # Check the unsatisfied deps to see if any initially satisfied deps
2714 # will become unsatisfied due to an upgrade. Initially unsatisfied
2715 # deps are irrelevant since we only want to avoid breaking deps
2716 # that are initially satisfied.
2717 while self._dynamic_config._unsatisfied_deps:
2718 dep = self._dynamic_config._unsatisfied_deps.pop()
2719 matches = vardb.match_pkgs(dep.atom)
2721 self._dynamic_config._initially_unsatisfied_deps.append(dep)
2723 # An scheduled installation broke a deep dependency.
2724 # Add the installed package to the graph so that it
2725 # will be appropriately reported as a slot collision
2726 # (possibly solvable via backtracking).
2727 pkg = matches[-1] # highest match
2728 if not self._add_pkg(pkg, dep):
2730 if not self._create_graph(allow_unsatisfied=True):
2734 def _pkg(self, cpv, type_name, root_config, installed=False,
2737 Get a package instance from the cache, or create a new
2738 one if necessary. Raises PackageNotFound from aux_get if it
2739 failures for some reason (package does not exist or is
2743 if installed or onlydeps:
2744 operation = "nomerge"
2745 pkg = self._frozen_config._pkg_cache.get(
2746 (type_name, root_config.root, cpv, operation))
2747 if pkg is None and onlydeps and not installed:
2748 # Maybe it already got pulled in as a "merge" node.
2749 pkg = self._dynamic_config.mydbapi[root_config.root].get(
2750 (type_name, root_config.root, cpv, 'merge'))
2753 tree_type = self.pkg_tree_map[type_name]
2754 db = root_config.trees[tree_type].dbapi
2755 db_keys = list(self._frozen_config._trees_orig[root_config.root][
2756 tree_type].dbapi._aux_cache_keys)
2758 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
2760 raise portage.exception.PackageNotFound(cpv)
2761 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
2762 installed=installed, metadata=metadata, onlydeps=onlydeps,
2763 root_config=root_config, type_name=type_name)
2764 self._frozen_config._pkg_cache[pkg] = pkg
2767 def _validate_blockers(self):
2768 """Remove any blockers from the digraph that do not match any of the
2769 packages within the graph. If necessary, create hard deps to ensure
2770 correct merge order such that mutually blocking packages are never
2771 installed simultaneously."""
2773 if "--buildpkgonly" in self._frozen_config.myopts or \
2774 "--nodeps" in self._frozen_config.myopts:
2777 complete = "complete" in self._dynamic_config.myparams
2778 deep = "deep" in self._dynamic_config.myparams
2781 # Pull in blockers from all installed packages that haven't already
2782 # been pulled into the depgraph. This is not enabled by default
2783 # due to the performance penalty that is incurred by all the
2784 # additional dep_check calls that are required.
2786 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2787 for myroot in self._frozen_config.trees:
2788 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
2789 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
2790 pkgsettings = self._frozen_config.pkgsettings[myroot]
2791 root_config = self._frozen_config.roots[myroot]
2792 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
2793 final_db = self._dynamic_config.mydbapi[myroot]
2795 blocker_cache = BlockerCache(myroot, vardb)
2796 stale_cache = set(blocker_cache)
2799 stale_cache.discard(cpv)
2800 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
2802 # Check for masked installed packages. Only warn about
2803 # packages that are in the graph in order to avoid warning
2804 # about those that will be automatically uninstalled during
2805 # the merge process or by --depclean. Always warn about
2806 # packages masked by license, since the user likely wants
2807 # to adjust ACCEPT_LICENSE.
2809 if pkg_in_graph and not visible(pkgsettings, pkg):
2810 self._dynamic_config._masked_installed.add(pkg)
2811 elif pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
2812 self._dynamic_config._masked_installed.add(pkg)
2813 elif pkg_in_graph or complete or deep:
2814 # Check for upgrades in the same slot that are
2815 # masked due to a LICENSE change in a newer
2816 # version that is not masked for any other reason.
2817 # Only do this for packages that are already in
2818 # the graph, or complete or deep graphs, since
2819 # otherwise it is likely a waste of time.
2821 for db, pkg_type, built, installed, db_keys in dbs:
2826 for upgrade_pkg in self._iter_match_pkgs(
2827 root_config, pkg_type, pkg.slot_atom):
2828 if upgrade_pkg <= pkg:
2830 if not visible(pkgsettings,
2831 upgrade_pkg, ignore=('LICENSE',)):
2833 if pkgsettings._getMissingLicenses(
2834 upgrade_pkg.cpv, upgrade_pkg.metadata):
2835 self._dynamic_config._masked_license_updates.add(upgrade_pkg)
2839 blocker_atoms = None
2845 self._dynamic_config._blocker_parents.child_nodes(pkg))
2850 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
2853 if blockers is not None:
2854 blockers = set(blocker.atom for blocker in blockers)
2856 # If this node has any blockers, create a "nomerge"
2857 # node for it so that they can be enforced.
2858 self._spinner_update()
2859 blocker_data = blocker_cache.get(cpv)
2860 if blocker_data is not None and \
2861 blocker_data.counter != long(pkg.metadata["COUNTER"]):
2864 # If blocker data from the graph is available, use
2865 # it to validate the cache and update the cache if
2867 if blocker_data is not None and \
2868 blockers is not None:
2869 if not blockers.symmetric_difference(
2870 blocker_data.atoms):
2874 if blocker_data is None and \
2875 blockers is not None:
2876 # Re-use the blockers from the graph.
2877 blocker_atoms = sorted(blockers)
2878 counter = long(pkg.metadata["COUNTER"])
2880 blocker_cache.BlockerData(counter, blocker_atoms)
2881 blocker_cache[pkg.cpv] = blocker_data
2885 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
2887 # Use aux_get() to trigger FakeVartree global
2888 # updates on *DEPEND when appropriate.
2889 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2890 # It is crucial to pass in final_db here in order to
2891 # optimize dep_check calls by eliminating atoms via
2892 # dep_wordreduce and dep_eval calls.
2894 portage.dep._dep_check_strict = False
2896 success, atoms = portage.dep_check(depstr,
2897 final_db, pkgsettings, myuse=pkg.use.enabled,
2898 trees=self._dynamic_config._graph_trees, myroot=myroot)
2899 except Exception as e:
2900 if isinstance(e, SystemExit):
2902 # This is helpful, for example, if a ValueError
2903 # is thrown from cpv_expand due to multiple
2904 # matches (this can happen if an atom lacks a
2906 show_invalid_depstring_notice(
2907 pkg, depstr, str(e))
2911 portage.dep._dep_check_strict = True
2913 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2914 if replacement_pkg and \
2915 replacement_pkg[0].operation == "merge":
2916 # This package is being replaced anyway, so
2917 # ignore invalid dependencies so as not to
2918 # annoy the user too much (otherwise they'd be
2919 # forced to manually unmerge it first).
2921 show_invalid_depstring_notice(pkg, depstr, atoms)
2923 blocker_atoms = [myatom for myatom in atoms \
2925 blocker_atoms.sort()
2926 counter = long(pkg.metadata["COUNTER"])
2927 blocker_cache[cpv] = \
2928 blocker_cache.BlockerData(counter, blocker_atoms)
2931 for atom in blocker_atoms:
2932 blocker = Blocker(atom=atom,
2933 eapi=pkg.metadata["EAPI"], root=myroot)
2934 self._dynamic_config._blocker_parents.add(blocker, pkg)
2935 except portage.exception.InvalidAtom as e:
2936 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2937 show_invalid_depstring_notice(
2938 pkg, depstr, "Invalid Atom: %s" % (e,))
2940 for cpv in stale_cache:
2941 del blocker_cache[cpv]
2942 blocker_cache.flush()
2945 # Discard any "uninstall" tasks scheduled by previous calls
2946 # to this method, since those tasks may not make sense given
2947 # the current graph state.
2948 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
2949 if previous_uninstall_tasks:
2950 self._dynamic_config._blocker_uninstalls = digraph()
2951 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
2953 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
2954 self._spinner_update()
2955 root_config = self._frozen_config.roots[blocker.root]
2956 virtuals = root_config.settings.getvirtuals()
2957 myroot = blocker.root
2958 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
2959 final_db = self._dynamic_config.mydbapi[myroot]
2961 provider_virtual = False
2962 if blocker.cp in virtuals and \
2963 not self._have_new_virt(blocker.root, blocker.cp):
2964 provider_virtual = True
2966 # Use this to check PROVIDE for each matched package
2968 atom_set = InternalPackageSet(
2969 initial_atoms=[blocker.atom])
2971 if provider_virtual:
2973 for provider_entry in virtuals[blocker.cp]:
2975 portage.dep_getkey(provider_entry)
2976 atoms.append(Atom(blocker.atom.replace(
2977 blocker.cp, provider_cp)))
2979 atoms = [blocker.atom]
2981 blocked_initial = set()
2983 for pkg in initial_db.match_pkgs(atom):
2984 if atom_set.findAtomForPackage(pkg):
2985 blocked_initial.add(pkg)
2987 blocked_final = set()
2989 for pkg in final_db.match_pkgs(atom):
2990 if atom_set.findAtomForPackage(pkg):
2991 blocked_final.add(pkg)
2993 if not blocked_initial and not blocked_final:
2994 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
2995 self._dynamic_config._blocker_parents.remove(blocker)
2996 # Discard any parents that don't have any more blockers.
2997 for pkg in parent_pkgs:
2998 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
2999 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3000 self._dynamic_config._blocker_parents.remove(pkg)
3002 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3003 unresolved_blocks = False
3004 depends_on_order = set()
3005 for pkg in blocked_initial:
3006 if pkg.slot_atom == parent.slot_atom and \
3007 not blocker.atom.blocker.overlap.forbid:
3008 # New !!atom blockers do not allow temporary
3009 # simulaneous installation, so unlike !atom
3010 # blockers, !!atom blockers aren't ignored
3011 # when they match other packages occupying
3014 if parent.installed:
3015 # Two currently installed packages conflict with
3016 # eachother. Ignore this case since the damage
3017 # is already done and this would be likely to
3018 # confuse users if displayed like a normal blocker.
3021 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3023 if parent.operation == "merge":
3024 # Maybe the blocked package can be replaced or simply
3025 # unmerged to resolve this block.
3026 depends_on_order.add((pkg, parent))
3028 # None of the above blocker resolutions techniques apply,
3029 # so apparently this one is unresolvable.
3030 unresolved_blocks = True
3031 for pkg in blocked_final:
3032 if pkg.slot_atom == parent.slot_atom and \
3033 not blocker.atom.blocker.overlap.forbid:
3034 # New !!atom blockers do not allow temporary
3035 # simulaneous installation, so unlike !atom
3036 # blockers, !!atom blockers aren't ignored
3037 # when they match other packages occupying
3040 if parent.operation == "nomerge" and \
3041 pkg.operation == "nomerge":
3042 # This blocker will be handled the next time that a
3043 # merge of either package is triggered.
3046 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3048 # Maybe the blocking package can be
3049 # unmerged to resolve this block.
3050 if parent.operation == "merge" and pkg.installed:
3051 depends_on_order.add((pkg, parent))
3053 elif parent.operation == "nomerge":
3054 depends_on_order.add((parent, pkg))
3056 # None of the above blocker resolutions techniques apply,
3057 # so apparently this one is unresolvable.
3058 unresolved_blocks = True
3060 # Make sure we don't unmerge any package that have been pulled
3062 if not unresolved_blocks and depends_on_order:
3063 for inst_pkg, inst_task in depends_on_order:
3064 if self._dynamic_config.digraph.contains(inst_pkg) and \
3065 self._dynamic_config.digraph.parent_nodes(inst_pkg):
3066 unresolved_blocks = True
3069 if not unresolved_blocks and depends_on_order:
3070 for inst_pkg, inst_task in depends_on_order:
3071 uninst_task = Package(built=inst_pkg.built,
3072 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3073 metadata=inst_pkg.metadata,
3074 operation="uninstall",
3075 root_config=inst_pkg.root_config,
3076 type_name=inst_pkg.type_name)
3077 # Enforce correct merge order with a hard dep.
3078 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
3079 priority=BlockerDepPriority.instance)
3080 # Count references to this blocker so that it can be
3081 # invalidated after nodes referencing it have been
3083 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
3084 if not unresolved_blocks and not depends_on_order:
3085 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
3086 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
3087 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
3088 self._dynamic_config._blocker_parents.remove(blocker)
3089 if not self._dynamic_config._blocker_parents.child_nodes(parent):
3090 self._dynamic_config._blocker_parents.remove(parent)
3091 if unresolved_blocks:
3092 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
3096 def _accept_blocker_conflicts(self):
3098 for x in ("--buildpkgonly", "--fetchonly",
3099 "--fetch-all-uri", "--nodeps"):
3100 if x in self._frozen_config.myopts:
3105 def _merge_order_bias(self, mygraph):
3107 For optimal leaf node selection, promote deep system runtime deps and
3108 order nodes from highest to lowest overall reference count.
3112 for node in mygraph.order:
3113 node_info[node] = len(mygraph.parent_nodes(node))
3114 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
3116 def cmp_merge_preference(node1, node2):
3118 if node1.operation == 'uninstall':
3119 if node2.operation == 'uninstall':
3123 if node2.operation == 'uninstall':
3124 if node1.operation == 'uninstall':
3128 node1_sys = node1 in deep_system_deps
3129 node2_sys = node2 in deep_system_deps
3130 if node1_sys != node2_sys:
3135 return node_info[node2] - node_info[node1]
3137 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3139 def altlist(self, reversed=False):
3141 while self._dynamic_config._serialized_tasks_cache is None:
3142 self._resolve_conflicts()
3144 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3145 self._serialize_tasks()
3146 except self._serialize_tasks_retry:
3149 retlist = self._dynamic_config._serialized_tasks_cache[:]
3154 def schedulerGraph(self):
3156 The scheduler graph is identical to the normal one except that
3157 uninstall edges are reversed in specific cases that require
3158 conflicting packages to be temporarily installed simultaneously.
3159 This is intended for use by the Scheduler in it's parallelization
3160 logic. It ensures that temporary simultaneous installation of
3161 conflicting packages is avoided when appropriate (especially for
3162 !!atom blockers), but allowed in specific cases that require it.
3164 Note that this method calls break_refs() which alters the state of
3165 internal Package instances such that this depgraph instance should
3166 not be used to perform any more calculations.
3168 if self._dynamic_config._scheduler_graph is None:
3170 self.break_refs(self._dynamic_config._scheduler_graph.order)
3171 return self._dynamic_config._scheduler_graph
3173 def break_refs(self, nodes):
3175 Take a mergelist like that returned from self.altlist() and
3176 break any references that lead back to the depgraph. This is
3177 useful if you want to hold references to packages without
3178 also holding the depgraph on the heap.
3181 if hasattr(node, "root_config"):
3182 # The FakeVartree references the _package_cache which
3183 # references the depgraph. So that Package instances don't
3184 # hold the depgraph and FakeVartree on the heap, replace
3185 # the RootConfig that references the FakeVartree with the
3186 # original RootConfig instance which references the actual
3188 node.root_config = \
3189 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
3191 def _resolve_conflicts(self):
3192 if not self._complete_graph():
3193 raise self._unknown_internal_error()
3195 if not self._validate_blockers():
3196 raise self._unknown_internal_error()
3198 if self._dynamic_config._slot_collision_info:
3199 self._process_slot_conflicts()
3201 def _serialize_tasks(self):
3203 if "--debug" in self._frozen_config.myopts:
3204 writemsg("\ndigraph:\n\n", noiselevel=-1)
3205 self._dynamic_config.digraph.debug_print()
3206 writemsg("\n", noiselevel=-1)
3208 scheduler_graph = self._dynamic_config.digraph.copy()
3210 if '--nodeps' in self._frozen_config.myopts:
3211 # Preserve the package order given on the command line.
3212 return ([node for node in scheduler_graph \
3213 if isinstance(node, Package) \
3214 and node.operation == 'merge'], scheduler_graph)
3216 mygraph=self._dynamic_config.digraph.copy()
3217 # Prune "nomerge" root nodes if nothing depends on them, since
3218 # otherwise they slow down merge order calculation. Don't remove
3219 # non-root nodes since they help optimize merge order in some cases
3220 # such as revdep-rebuild.
3221 removed_nodes = set()
3223 for node in mygraph.root_nodes():
3224 if not isinstance(node, Package) or \
3225 node.installed or node.onlydeps:
3226 removed_nodes.add(node)
3228 self._spinner_update()
3229 mygraph.difference_update(removed_nodes)
3230 if not removed_nodes:
3232 removed_nodes.clear()
3233 self._merge_order_bias(mygraph)
3234 def cmp_circular_bias(n1, n2):
3236 RDEPEND is stronger than PDEPEND and this function
3237 measures such a strength bias within a circular
3238 dependency relationship.
3240 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3241 ignore_priority=priority_range.ignore_medium_soft)
3242 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3243 ignore_priority=priority_range.ignore_medium_soft)
3244 if n1_n2_medium == n2_n1_medium:
3249 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
3251 # Contains uninstall tasks that have been scheduled to
3252 # occur after overlapping blockers have been installed.
3253 scheduled_uninstalls = set()
3254 # Contains any Uninstall tasks that have been ignored
3255 # in order to avoid the circular deps code path. These
3256 # correspond to blocker conflicts that could not be
3258 ignored_uninstall_tasks = set()
3259 have_uninstall_task = False
3260 complete = "complete" in self._dynamic_config.myparams
3263 def get_nodes(**kwargs):
3265 Returns leaf nodes excluding Uninstall instances
3266 since those should be executed as late as possible.
3268 return [node for node in mygraph.leaf_nodes(**kwargs) \
3269 if isinstance(node, Package) and \
3270 (node.operation != "uninstall" or \
3271 node in scheduled_uninstalls)]
3273 # sys-apps/portage needs special treatment if ROOT="/"
3274 running_root = self._frozen_config._running_root.root
3275 from portage.const import PORTAGE_PACKAGE_ATOM
3276 runtime_deps = InternalPackageSet(
3277 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3278 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
3279 PORTAGE_PACKAGE_ATOM)
3280 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
3281 PORTAGE_PACKAGE_ATOM)
3284 running_portage = running_portage[0]
3286 running_portage = None
3288 if replacement_portage:
3289 replacement_portage = replacement_portage[0]
3291 replacement_portage = None
3293 if replacement_portage == running_portage:
3294 replacement_portage = None
3296 if replacement_portage is not None:
3297 # update from running_portage to replacement_portage asap
3298 asap_nodes.append(replacement_portage)
3300 if running_portage is not None:
3302 portage_rdepend = self._select_atoms_highest_available(
3303 running_root, running_portage.metadata["RDEPEND"],
3304 myuse=running_portage.use.enabled,
3305 parent=running_portage, strict=False)
3306 except portage.exception.InvalidDependString as e:
3307 portage.writemsg("!!! Invalid RDEPEND in " + \
3308 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3309 (running_root, running_portage.cpv, e), noiselevel=-1)
3311 portage_rdepend = {running_portage : []}
3312 for atoms in portage_rdepend.values():
3313 runtime_deps.update(atom for atom in atoms \
3314 if not atom.blocker)
3316 def gather_deps(ignore_priority, mergeable_nodes,
3317 selected_nodes, node):
3319 Recursively gather a group of nodes that RDEPEND on
3320 eachother. This ensures that they are merged as a group
3321 and get their RDEPENDs satisfied as soon as possible.
3323 if node in selected_nodes:
3325 if node not in mergeable_nodes:
3327 if node == replacement_portage and \
3328 mygraph.child_nodes(node,
3329 ignore_priority=priority_range.ignore_medium_soft):
3330 # Make sure that portage always has all of it's
3331 # RDEPENDs installed first.
3333 selected_nodes.add(node)
3334 for child in mygraph.child_nodes(node,
3335 ignore_priority=ignore_priority):
3336 if not gather_deps(ignore_priority,
3337 mergeable_nodes, selected_nodes, child):
3341 def ignore_uninst_or_med(priority):
3342 if priority is BlockerDepPriority.instance:
3344 return priority_range.ignore_medium(priority)
3346 def ignore_uninst_or_med_soft(priority):
3347 if priority is BlockerDepPriority.instance:
3349 return priority_range.ignore_medium_soft(priority)
3351 tree_mode = "--tree" in self._frozen_config.myopts
3352 # Tracks whether or not the current iteration should prefer asap_nodes
3353 # if available. This is set to False when the previous iteration
3354 # failed to select any nodes. It is reset whenever nodes are
3355 # successfully selected.
3358 # Controls whether or not the current iteration should drop edges that
3359 # are "satisfied" by installed packages, in order to solve circular
3360 # dependencies. The deep runtime dependencies of installed packages are
3361 # not checked in this case (bug #199856), so it must be avoided
3362 # whenever possible.
3363 drop_satisfied = False
3365 # State of variables for successive iterations that loosen the
3366 # criteria for node selection.
3368 # iteration prefer_asap drop_satisfied
3373 # If no nodes are selected on the last iteration, it is due to
3374 # unresolved blockers or circular dependencies.
3376 while not mygraph.empty():
3377 self._spinner_update()
3378 selected_nodes = None
3379 ignore_priority = None
3380 if drop_satisfied or (prefer_asap and asap_nodes):
3381 priority_range = DepPrioritySatisfiedRange
3383 priority_range = DepPriorityNormalRange
3384 if prefer_asap and asap_nodes:
3385 # ASAP nodes are merged before their soft deps. Go ahead and
3386 # select root nodes here if necessary, since it's typical for
3387 # the parent to have been removed from the graph already.
3388 asap_nodes = [node for node in asap_nodes \
3389 if mygraph.contains(node)]
3390 for node in asap_nodes:
3391 if not mygraph.child_nodes(node,
3392 ignore_priority=priority_range.ignore_soft):
3393 selected_nodes = [node]
3394 asap_nodes.remove(node)
3396 if not selected_nodes and \
3397 not (prefer_asap and asap_nodes):
3398 for i in range(priority_range.NONE,
3399 priority_range.MEDIUM_SOFT + 1):
3400 ignore_priority = priority_range.ignore_priority[i]
3401 nodes = get_nodes(ignore_priority=ignore_priority)
3403 # If there is a mixture of merges and uninstalls,
3404 # do the uninstalls first.
3406 good_uninstalls = []
3408 if node.operation == "uninstall":
3409 good_uninstalls.append(node)
3412 nodes = good_uninstalls
3416 if ignore_priority is None and not tree_mode:
3417 # Greedily pop all of these nodes since no
3418 # relationship has been ignored. This optimization
3419 # destroys --tree output, so it's disabled in tree
3421 selected_nodes = nodes
3423 # For optimal merge order:
3424 # * Only pop one node.
3425 # * Removing a root node (node without a parent)
3426 # will not produce a leaf node, so avoid it.
3427 # * It's normal for a selected uninstall to be a
3428 # root node, so don't check them for parents.
3430 if node.operation == "uninstall" or \
3431 mygraph.parent_nodes(node):
3432 selected_nodes = [node]
3438 if not selected_nodes:
3439 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
3441 mergeable_nodes = set(nodes)
3442 if prefer_asap and asap_nodes:
3444 for i in range(priority_range.SOFT,
3445 priority_range.MEDIUM_SOFT + 1):
3446 ignore_priority = priority_range.ignore_priority[i]
3448 if not mygraph.parent_nodes(node):
3450 selected_nodes = set()
3451 if gather_deps(ignore_priority,
3452 mergeable_nodes, selected_nodes, node):
3455 selected_nodes = None
3459 if prefer_asap and asap_nodes and not selected_nodes:
3460 # We failed to find any asap nodes to merge, so ignore
3461 # them for the next iteration.
3465 if selected_nodes and ignore_priority is not None:
3466 # Try to merge ignored medium_soft deps as soon as possible
3467 # if they're not satisfied by installed packages.
3468 for node in selected_nodes:
3469 children = set(mygraph.child_nodes(node))
3470 soft = children.difference(
3471 mygraph.child_nodes(node,
3472 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
3473 medium_soft = children.difference(
3474 mygraph.child_nodes(node,
3476 DepPrioritySatisfiedRange.ignore_medium_soft))
3477 medium_soft.difference_update(soft)
3478 for child in medium_soft:
3479 if child in selected_nodes:
3481 if child in asap_nodes:
3483 asap_nodes.append(child)
3485 if selected_nodes and len(selected_nodes) > 1:
3486 if not isinstance(selected_nodes, list):
3487 selected_nodes = list(selected_nodes)
3488 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
3490 if not selected_nodes and not myblocker_uninstalls.is_empty():
3491 # An Uninstall task needs to be executed in order to
3492 # avoid conflict if possible.
3495 priority_range = DepPrioritySatisfiedRange
3497 priority_range = DepPriorityNormalRange
3499 mergeable_nodes = get_nodes(
3500 ignore_priority=ignore_uninst_or_med)
3502 min_parent_deps = None
3504 for task in myblocker_uninstalls.leaf_nodes():
3505 # Do some sanity checks so that system or world packages
3506 # don't get uninstalled inappropriately here (only really
3507 # necessary when --complete-graph has not been enabled).
3509 if task in ignored_uninstall_tasks:
3512 if task in scheduled_uninstalls:
3513 # It's been scheduled but it hasn't
3514 # been executed yet due to dependence
3515 # on installation of blocking packages.
3518 root_config = self._frozen_config.roots[task.root]
3519 inst_pkg = self._pkg(task.cpv, "installed", root_config,
3522 if self._dynamic_config.digraph.contains(inst_pkg):
3525 forbid_overlap = False
3526 heuristic_overlap = False
3527 for blocker in myblocker_uninstalls.parent_nodes(task):
3528 if blocker.eapi in ("0", "1"):
3529 heuristic_overlap = True
3530 elif blocker.atom.blocker.overlap.forbid:
3531 forbid_overlap = True
3533 if forbid_overlap and running_root == task.root:
3536 if heuristic_overlap and running_root == task.root:
3537 # Never uninstall sys-apps/portage or it's essential
3538 # dependencies, except through replacement.
3540 runtime_dep_atoms = \
3541 list(runtime_deps.iterAtomsForPackage(task))
3542 except portage.exception.InvalidDependString as e:
3543 portage.writemsg("!!! Invalid PROVIDE in " + \
3544 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3545 (task.root, task.cpv, e), noiselevel=-1)
3549 # Don't uninstall a runtime dep if it appears
3550 # to be the only suitable one installed.
3552 vardb = root_config.trees["vartree"].dbapi
3553 for atom in runtime_dep_atoms:
3554 other_version = None
3555 for pkg in vardb.match_pkgs(atom):
3556 if pkg.cpv == task.cpv and \
3557 pkg.metadata["COUNTER"] == \
3558 task.metadata["COUNTER"]:
3562 if other_version is None:
3568 # For packages in the system set, don't take
3569 # any chances. If the conflict can't be resolved
3570 # by a normal replacement operation then abort.
3573 for atom in root_config.sets[
3574 "system"].iterAtomsForPackage(task):
3577 except portage.exception.InvalidDependString as e:
3578 portage.writemsg("!!! Invalid PROVIDE in " + \
3579 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3580 (task.root, task.cpv, e), noiselevel=-1)
3586 # Note that the world check isn't always
3587 # necessary since self._complete_graph() will
3588 # add all packages from the system and world sets to the
3589 # graph. This just allows unresolved conflicts to be
3590 # detected as early as possible, which makes it possible
3591 # to avoid calling self._complete_graph() when it is
3592 # unnecessary due to blockers triggering an abortion.
3594 # For packages in the world set, go ahead an uninstall
3595 # when necessary, as long as the atom will be satisfied
3596 # in the final state.
3597 graph_db = self._dynamic_config.mydbapi[task.root]
3600 for atom in root_config.sets[
3601 "selected"].iterAtomsForPackage(task):
3603 for pkg in graph_db.match_pkgs(atom):
3610 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
3612 except portage.exception.InvalidDependString as e:
3613 portage.writemsg("!!! Invalid PROVIDE in " + \
3614 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3615 (task.root, task.cpv, e), noiselevel=-1)
3621 # Check the deps of parent nodes to ensure that
3622 # the chosen task produces a leaf node. Maybe
3623 # this can be optimized some more to make the
3624 # best possible choice, but the current algorithm
3625 # is simple and should be near optimal for most
3627 mergeable_parent = False
3629 for parent in mygraph.parent_nodes(task):
3630 parent_deps.update(mygraph.child_nodes(parent,
3631 ignore_priority=priority_range.ignore_medium_soft))
3632 if parent in mergeable_nodes and \
3633 gather_deps(ignore_uninst_or_med_soft,
3634 mergeable_nodes, set(), parent):
3635 mergeable_parent = True
3637 if not mergeable_parent:
3640 parent_deps.remove(task)
3641 if min_parent_deps is None or \
3642 len(parent_deps) < min_parent_deps:
3643 min_parent_deps = len(parent_deps)
3646 if uninst_task is not None:
3647 # The uninstall is performed only after blocking
3648 # packages have been merged on top of it. File
3649 # collisions between blocking packages are detected
3650 # and removed from the list of files to be uninstalled.
3651 scheduled_uninstalls.add(uninst_task)
3652 parent_nodes = mygraph.parent_nodes(uninst_task)
3654 # Reverse the parent -> uninstall edges since we want
3655 # to do the uninstall after blocking packages have
3656 # been merged on top of it.
3657 mygraph.remove(uninst_task)
3658 for blocked_pkg in parent_nodes:
3659 mygraph.add(blocked_pkg, uninst_task,
3660 priority=BlockerDepPriority.instance)
3661 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3662 scheduler_graph.add(blocked_pkg, uninst_task,
3663 priority=BlockerDepPriority.instance)
3665 # Sometimes a merge node will render an uninstall
3666 # node unnecessary (due to occupying the same SLOT),
3667 # and we want to avoid executing a separate uninstall
3668 # task in that case.
3669 slot_node = self._dynamic_config.mydbapi[uninst_task.root
3670 ].match_pkgs(uninst_task.slot_atom)
3672 slot_node[0].operation == "merge":
3673 mygraph.add(slot_node[0], uninst_task,
3674 priority=BlockerDepPriority.instance)
3676 # Reset the state variables for leaf node selection and
3677 # continue trying to select leaf nodes.
3679 drop_satisfied = False
3682 if not selected_nodes:
3683 # Only select root nodes as a last resort. This case should
3684 # only trigger when the graph is nearly empty and the only
3685 # remaining nodes are isolated (no parents or children). Since
3686 # the nodes must be isolated, ignore_priority is not needed.
3687 selected_nodes = get_nodes()
3689 if not selected_nodes and not drop_satisfied:
3690 drop_satisfied = True
3693 if not selected_nodes and not myblocker_uninstalls.is_empty():
3694 # If possible, drop an uninstall task here in order to avoid
3695 # the circular deps code path. The corresponding blocker will
3696 # still be counted as an unresolved conflict.
3698 for node in myblocker_uninstalls.leaf_nodes():
3700 mygraph.remove(node)
3705 ignored_uninstall_tasks.add(node)
3708 if uninst_task is not None:
3709 # Reset the state variables for leaf node selection and
3710 # continue trying to select leaf nodes.
3712 drop_satisfied = False
3715 if not selected_nodes:
3716 self._dynamic_config._circular_deps_for_display = mygraph
3717 raise self._unknown_internal_error()
3719 # At this point, we've succeeded in selecting one or more nodes, so
3720 # reset state variables for leaf node selection.
3722 drop_satisfied = False
3724 mygraph.difference_update(selected_nodes)
3726 for node in selected_nodes:
3727 if isinstance(node, Package) and \
3728 node.operation == "nomerge":
3731 # Handle interactions between blockers
3732 # and uninstallation tasks.
3733 solved_blockers = set()
3735 if isinstance(node, Package) and \
3736 "uninstall" == node.operation:
3737 have_uninstall_task = True
3740 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
3741 previous_cpv = vardb.match(node.slot_atom)
3743 # The package will be replaced by this one, so remove
3744 # the corresponding Uninstall task if necessary.
3745 previous_cpv = previous_cpv[0]
3747 ("installed", node.root, previous_cpv, "uninstall")
3749 mygraph.remove(uninst_task)
3753 if uninst_task is not None and \
3754 uninst_task not in ignored_uninstall_tasks and \
3755 myblocker_uninstalls.contains(uninst_task):
3756 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3757 myblocker_uninstalls.remove(uninst_task)
3758 # Discard any blockers that this Uninstall solves.
3759 for blocker in blocker_nodes:
3760 if not myblocker_uninstalls.child_nodes(blocker):
3761 myblocker_uninstalls.remove(blocker)
3762 solved_blockers.add(blocker)
3764 retlist.append(node)
3766 if (isinstance(node, Package) and \
3767 "uninstall" == node.operation) or \
3768 (uninst_task is not None and \
3769 uninst_task in scheduled_uninstalls):
3770 # Include satisfied blockers in the merge list
3771 # since the user might be interested and also
3772 # it serves as an indicator that blocking packages
3773 # will be temporarily installed simultaneously.
3774 for blocker in solved_blockers:
3775 retlist.append(Blocker(atom=blocker.atom,
3776 root=blocker.root, eapi=blocker.eapi,
3779 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
3780 for node in myblocker_uninstalls.root_nodes():
3781 unsolvable_blockers.add(node)
3783 for blocker in unsolvable_blockers:
3784 retlist.append(blocker)
3786 # If any Uninstall tasks need to be executed in order
3787 # to avoid a conflict, complete the graph with any
3788 # dependencies that may have been initially
3789 # neglected (to ensure that unsafe Uninstall tasks
3790 # are properly identified and blocked from execution).
3791 if have_uninstall_task and \
3793 not unsolvable_blockers:
3794 self._dynamic_config.myparams["complete"] = True
3795 raise self._serialize_tasks_retry("")
3797 if unsolvable_blockers and \
3798 not self._accept_blocker_conflicts():
3799 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
3800 self._dynamic_config._serialized_tasks_cache = retlist[:]
3801 self._dynamic_config._scheduler_graph = scheduler_graph
3802 raise self._unknown_internal_error()
3804 if self._dynamic_config._slot_collision_info and \
3805 not self._accept_blocker_conflicts():
3806 self._dynamic_config._serialized_tasks_cache = retlist[:]
3807 self._dynamic_config._scheduler_graph = scheduler_graph
3808 raise self._unknown_internal_error()
3810 return retlist, scheduler_graph
3812 def _show_circular_deps(self, mygraph):
3813 # No leaf nodes are available, so we have a circular
3814 # dependency panic situation. Reduce the noise level to a
3815 # minimum via repeated elimination of root nodes since they
3816 # have no parents and thus can not be part of a cycle.
3818 root_nodes = mygraph.root_nodes(
3819 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3822 mygraph.difference_update(root_nodes)
3823 # Display the USE flags that are enabled on nodes that are part
3824 # of dependency cycles in case that helps the user decide to
3825 # disable some of them.
3827 tempgraph = mygraph.copy()
3828 while not tempgraph.empty():
3829 nodes = tempgraph.leaf_nodes()
3831 node = tempgraph.order[0]
3834 display_order.append(node)
3835 tempgraph.remove(node)
3836 display_order.reverse()
3837 self._frozen_config.myopts.pop("--quiet", None)
3838 self._frozen_config.myopts.pop("--verbose", None)
3839 self._frozen_config.myopts["--tree"] = True
3840 portage.writemsg("\n\n", noiselevel=-1)
3841 self.display(display_order)
3842 prefix = colorize("BAD", " * ")
3843 portage.writemsg("\n", noiselevel=-1)
3844 portage.writemsg(prefix + "Error: circular dependencies:\n",
3846 portage.writemsg("\n", noiselevel=-1)
3847 mygraph.debug_print()
3848 portage.writemsg("\n", noiselevel=-1)
3849 portage.writemsg(prefix + "Note that circular dependencies " + \
3850 "can often be avoided by temporarily\n", noiselevel=-1)
3851 portage.writemsg(prefix + "disabling USE flags that trigger " + \
3852 "optional dependencies.\n", noiselevel=-1)
3854 def _show_merge_list(self):
3855 if self._dynamic_config._serialized_tasks_cache is not None and \
3856 not (self._dynamic_config._displayed_list and \
3857 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
3858 self._dynamic_config._displayed_list == \
3859 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
3860 display_list = self._dynamic_config._serialized_tasks_cache[:]
3861 if "--tree" in self._frozen_config.myopts:
3862 display_list.reverse()
3863 self.display(display_list)
3865 def _show_unsatisfied_blockers(self, blockers):
3866 self._show_merge_list()
3867 msg = "Error: The above package list contains " + \
3868 "packages which cannot be installed " + \
3869 "at the same time on the same system."
3870 prefix = colorize("BAD", " * ")
3871 from textwrap import wrap
3872 portage.writemsg("\n", noiselevel=-1)
3873 for line in wrap(msg, 70):
3874 portage.writemsg(prefix + line + "\n", noiselevel=-1)
3876 # Display the conflicting packages along with the packages
3877 # that pulled them in. This is helpful for troubleshooting
3878 # cases in which blockers don't solve automatically and
3879 # the reasons are not apparent from the normal merge list
3883 for blocker in blockers:
3884 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
3885 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
3886 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3887 if not parent_atoms:
3888 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
3889 if atom is not None:
3890 parent_atoms = set([("world", atom)])
3892 conflict_pkgs[pkg] = parent_atoms
3895 # Reduce noise by pruning packages that are only
3896 # pulled in by other conflict packages.
3898 for pkg, parent_atoms in conflict_pkgs.items():
3899 relevant_parent = False
3900 for parent, atom in parent_atoms:
3901 if parent not in conflict_pkgs:
3902 relevant_parent = True
3904 if not relevant_parent:
3905 pruned_pkgs.add(pkg)
3906 for pkg in pruned_pkgs:
3907 del conflict_pkgs[pkg]
3913 # Max number of parents shown, to avoid flooding the display.
3915 for pkg, parent_atoms in conflict_pkgs.items():
3919 # Prefer packages that are not directly involved in a conflict.
3920 for parent_atom in parent_atoms:
3921 if len(pruned_list) >= max_parents:
3923 parent, atom = parent_atom
3924 if parent not in conflict_pkgs:
3925 pruned_list.add(parent_atom)
3927 for parent_atom in parent_atoms:
3928 if len(pruned_list) >= max_parents:
3930 pruned_list.add(parent_atom)
3932 omitted_parents = len(parent_atoms) - len(pruned_list)
3933 msg.append(indent + "%s pulled in by\n" % pkg)
3935 for parent_atom in pruned_list:
3936 parent, atom = parent_atom
3937 msg.append(2*indent)
3938 if isinstance(parent,
3939 (PackageArg, AtomArg)):
3940 # For PackageArg and AtomArg types, it's
3941 # redundant to display the atom attribute.
3942 msg.append(str(parent))
3944 # Display the specific atom from SetArg or
3946 msg.append("%s required by %s" % (atom, parent))
3950 msg.append(2*indent)
3951 msg.append("(and %d more)\n" % omitted_parents)
3955 sys.stderr.write("".join(msg))
3958 if "--quiet" not in self._frozen_config.myopts:
3959 show_blocker_docs_link()
3961 def display(self, mylist, favorites=[], verbosity=None):
3963 # This is used to prevent display_problems() from
3964 # redundantly displaying this exact same merge list
3965 # again via _show_merge_list().
3966 self._dynamic_config._displayed_list = mylist
3968 if verbosity is None:
3969 verbosity = ("--quiet" in self._frozen_config.myopts and 1 or \
3970 "--verbose" in self._frozen_config.myopts and 3 or 2)
3971 favorites_set = InternalPackageSet(favorites)
3972 oneshot = "--oneshot" in self._frozen_config.myopts or \
3973 "--onlydeps" in self._frozen_config.myopts
3974 columns = "--columns" in self._frozen_config.myopts
3975 tree_display = "--tree" in self._frozen_config.myopts
3980 counters = PackageCounters()
3982 if verbosity == 1 and "--verbose" not in self._frozen_config.myopts:
3983 def create_use_string(*args):
3986 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3988 is_new, reinst_flags,
3989 all_flags=(verbosity == 3 or "--quiet" in self._frozen_config.myopts),
3990 alphabetical=("--alphabetical" in self._frozen_config.myopts)):
3998 cur_iuse = set(cur_iuse)
3999 enabled_flags = cur_iuse.intersection(cur_use)
4000 removed_iuse = set(old_iuse).difference(cur_iuse)
4001 any_iuse = cur_iuse.union(old_iuse)
4002 any_iuse = list(any_iuse)
4004 for flag in any_iuse:
4007 reinst_flag = reinst_flags and flag in reinst_flags
4008 if flag in enabled_flags:
4010 if is_new or flag in old_use and \
4011 (all_flags or reinst_flag):
4012 flag_str = red(flag)
4013 elif flag not in old_iuse:
4014 flag_str = yellow(flag) + "%*"
4015 elif flag not in old_use:
4016 flag_str = green(flag) + "*"
4017 elif flag in removed_iuse:
4018 if all_flags or reinst_flag:
4019 flag_str = yellow("-" + flag) + "%"
4022 flag_str = "(" + flag_str + ")"
4023 removed.append(flag_str)
4026 if is_new or flag in old_iuse and \
4027 flag not in old_use and \
4028 (all_flags or reinst_flag):
4029 flag_str = blue("-" + flag)
4030 elif flag not in old_iuse:
4031 flag_str = yellow("-" + flag)
4032 if flag not in iuse_forced:
4034 elif flag in old_use:
4035 flag_str = green("-" + flag) + "*"
4037 if flag in iuse_forced:
4038 flag_str = "(" + flag_str + ")"
4040 enabled.append(flag_str)
4042 disabled.append(flag_str)
4045 ret = " ".join(enabled)
4047 ret = " ".join(enabled + disabled + removed)
4049 ret = '%s="%s" ' % (name, ret)
4052 repo_display = RepoDisplay(self._frozen_config.roots)
4053 unsatisfied_blockers = []
4056 if isinstance(x, Blocker) and not x.satisfied:
4057 unsatisfied_blockers.append(x)
4059 ordered_nodes.append(x)
4062 display_list = self._tree_display(ordered_nodes)
4064 display_list = [(x, 0, True) for x in ordered_nodes]
4066 mylist = display_list
4067 for x in unsatisfied_blockers:
4068 mylist.append((x, 0, True))
4070 # files to fetch list - avoids counting a same file twice
4071 # in size display (verbose mode)
4074 # Use this set to detect when all the "repoadd" strings are "[0]"
4075 # and disable the entire repo display in this case.
4078 for mylist_index in range(len(mylist)):
4079 x, depth, ordered = mylist[mylist_index]
4083 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4084 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
4085 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4086 vartree = self._frozen_config.trees[myroot]["vartree"]
4087 pkgsettings = self._frozen_config.pkgsettings[myroot]
4090 indent = " " * depth
4092 if isinstance(x, Blocker):
4094 blocker_style = "PKG_BLOCKER_SATISFIED"
4095 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4097 blocker_style = "PKG_BLOCKER"
4098 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4100 counters.blocks += 1
4102 counters.blocks_satisfied += 1
4103 resolved = portage.dep_expand(
4104 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
4105 if "--columns" in self._frozen_config.myopts and "--quiet" in self._frozen_config.myopts:
4106 addl += " " + colorize(blocker_style, str(resolved))
4108 addl = "[%s %s] %s%s" % \
4109 (colorize(blocker_style, "blocks"),
4110 addl, indent, colorize(blocker_style, str(resolved)))
4111 block_parents = self._dynamic_config._blocker_parents.parent_nodes(x)
4112 block_parents = set([pnode[2] for pnode in block_parents])
4113 block_parents = ", ".join(block_parents)
4115 addl += colorize(blocker_style,
4116 " (\"%s\" is blocking %s)") % \
4117 (str(x.atom).lstrip("!"), block_parents)
4119 addl += colorize(blocker_style,
4120 " (is blocking %s)") % block_parents
4121 if isinstance(x, Blocker) and x.satisfied:
4126 blockers.append(addl)
4129 pkg_merge = ordered and pkg_status == "merge"
4130 if not pkg_merge and pkg_status == "merge":
4131 pkg_status = "nomerge"
4132 built = pkg_type != "ebuild"
4133 installed = pkg_type == "installed"
4135 metadata = pkg.metadata
4137 repo_name = metadata["repository"]
4138 if pkg.type_name == "ebuild":
4139 ebuild_path = portdb.findname(pkg.cpv)
4140 if ebuild_path is None:
4141 raise AssertionError(
4142 "ebuild not found for '%s'" % pkg.cpv)
4143 repo_path_real = os.path.dirname(os.path.dirname(
4144 os.path.dirname(ebuild_path)))
4146 repo_path_real = portdb.getRepositoryPath(repo_name)
4147 pkg_use = list(pkg.use.enabled)
4148 if not pkg.built and pkg.operation == 'merge' and \
4149 'fetch' in pkg.metadata.restrict:
4152 counters.restrict_fetch += 1
4153 if portdb.fetch_check(pkg_key, pkg_use):
4156 counters.restrict_fetch_satisfied += 1
4158 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4159 #param is used for -u, where you still *do* want to see when something is being upgraded.
4162 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4163 if vardb.cpv_exists(pkg_key):
4164 addl=" "+yellow("R")+fetch+" "
4167 counters.reinst += 1
4168 if pkg_type == "binary":
4169 counters.binary += 1
4170 elif pkg_status == "uninstall":
4171 counters.uninst += 1
4172 # filter out old-style virtual matches
4173 elif installed_versions and \
4174 portage.cpv_getkey(installed_versions[0]) == \
4175 portage.cpv_getkey(pkg_key):
4176 myinslotlist = vardb.match(pkg.slot_atom)
4177 # If this is the first install of a new-style virtual, we
4178 # need to filter out old-style virtual matches.
4179 if myinslotlist and \
4180 portage.cpv_getkey(myinslotlist[0]) != \
4181 portage.cpv_getkey(pkg_key):
4184 myoldbest = myinslotlist[:]
4186 if not portage.dep.cpvequal(pkg_key,
4187 portage.best([pkg_key] + myoldbest)):
4189 addl += turquoise("U")+blue("D")
4191 counters.downgrades += 1
4192 if pkg_type == "binary":
4193 counters.binary += 1
4196 addl += turquoise("U") + " "
4198 counters.upgrades += 1
4199 if pkg_type == "binary":
4200 counters.binary += 1
4202 # New slot, mark it new.
4203 addl = " " + green("NS") + fetch + " "
4204 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4206 counters.newslot += 1
4207 if pkg_type == "binary":
4208 counters.binary += 1
4210 if "--changelog" in self._frozen_config.myopts:
4211 inst_matches = vardb.match(pkg.slot_atom)
4213 ebuild_path_cl = ebuild_path
4214 if ebuild_path_cl is None:
4216 ebuild_path_cl = portdb.findname(pkg.cpv)
4218 if ebuild_path_cl is not None:
4219 changelogs.extend(calc_changelog(
4220 ebuild_path_cl, inst_matches[0], pkg.cpv))
4222 addl = " " + green("N") + " " + fetch + " "
4225 if pkg_type == "binary":
4226 counters.binary += 1
4233 forced_flags = set()
4234 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
4235 forced_flags.update(pkgsettings.useforce)
4236 forced_flags.update(pkgsettings.usemask)
4238 cur_use = [flag for flag in pkg.use.enabled \
4239 if flag in pkg.iuse.all]
4240 cur_iuse = sorted(pkg.iuse.all)
4242 if myoldbest and myinslotlist:
4243 previous_cpv = myoldbest[0]
4245 previous_cpv = pkg.cpv
4246 if vardb.cpv_exists(previous_cpv):
4247 old_iuse, old_use = vardb.aux_get(
4248 previous_cpv, ["IUSE", "USE"])
4249 old_iuse = list(set(
4250 filter_iuse_defaults(old_iuse.split())))
4252 old_use = old_use.split()
4259 old_use = [flag for flag in old_use if flag in old_iuse]
4261 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4263 use_expand.reverse()
4264 use_expand_hidden = \
4265 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4267 def map_to_use_expand(myvals, forcedFlags=False,
4271 for exp in use_expand:
4274 for val in myvals[:]:
4275 if val.startswith(exp.lower()+"_"):
4276 if val in forced_flags:
4277 forced[exp].add(val[len(exp)+1:])
4278 ret[exp].append(val[len(exp)+1:])
4281 forced["USE"] = [val for val in myvals \
4282 if val in forced_flags]
4284 for exp in use_expand_hidden:
4290 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4291 # are the only thing that triggered reinstallation.
4292 reinst_flags_map = {}
4293 reinstall_for_flags = self._dynamic_config._reinstall_nodes.get(pkg)
4294 reinst_expand_map = None
4295 if reinstall_for_flags:
4296 reinst_flags_map = map_to_use_expand(
4297 list(reinstall_for_flags), removeHidden=False)
4298 for k in list(reinst_flags_map):
4299 if not reinst_flags_map[k]:
4300 del reinst_flags_map[k]
4301 if not reinst_flags_map.get("USE"):
4302 reinst_expand_map = reinst_flags_map.copy()
4303 reinst_expand_map.pop("USE", None)
4304 if reinst_expand_map and \
4305 not set(reinst_expand_map).difference(
4307 use_expand_hidden = \
4308 set(use_expand_hidden).difference(
4311 cur_iuse_map, iuse_forced = \
4312 map_to_use_expand(cur_iuse, forcedFlags=True)
4313 cur_use_map = map_to_use_expand(cur_use)
4314 old_iuse_map = map_to_use_expand(old_iuse)
4315 old_use_map = map_to_use_expand(old_use)
4318 use_expand.insert(0, "USE")
4320 for key in use_expand:
4321 if key in use_expand_hidden:
4323 verboseadd += create_use_string(key.upper(),
4324 cur_iuse_map[key], iuse_forced[key],
4325 cur_use_map[key], old_iuse_map[key],
4326 old_use_map[key], is_new,
4327 reinst_flags_map.get(key))
4332 if pkg_type == "ebuild" and pkg_merge:
4334 myfilesdict = portdb.getfetchsizes(pkg_key,
4335 useflags=pkg_use, debug=self._frozen_config.edebug)
4336 except portage.exception.InvalidDependString as e:
4337 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4338 show_invalid_depstring_notice(x, src_uri, str(e))
4341 if myfilesdict is None:
4342 myfilesdict="[empty/missing/bad digest]"
4344 for myfetchfile in myfilesdict:
4345 if myfetchfile not in myfetchlist:
4346 mysize+=myfilesdict[myfetchfile]
4347 myfetchlist.append(myfetchfile)
4349 counters.totalsize += mysize
4350 verboseadd += format_size(mysize)
4353 # assign index for a previous version in the same slot
4354 has_previous = False
4355 repo_name_prev = None
4356 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4358 slot_matches = vardb.match(slot_atom)
4361 repo_name_prev = vardb.aux_get(slot_matches[0],
4364 # now use the data to generate output
4365 if pkg.installed or not has_previous:
4366 repoadd = repo_display.repoStr(repo_path_real)
4368 repo_path_prev = None
4370 repo_path_prev = portdb.getRepositoryPath(
4372 if repo_path_prev == repo_path_real:
4373 repoadd = repo_display.repoStr(repo_path_real)
4375 repoadd = "%s=>%s" % (
4376 repo_display.repoStr(repo_path_prev),
4377 repo_display.repoStr(repo_path_real))
4379 repoadd_set.add(repoadd)
4381 xs = [portage.cpv_getkey(pkg_key)] + \
4382 list(portage.catpkgsplit(pkg_key)[2:])
4389 if "COLUMNWIDTH" in self._frozen_config.settings:
4391 mywidth = int(self._frozen_config.settings["COLUMNWIDTH"])
4392 except ValueError as e:
4393 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4395 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4396 self._frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
4398 oldlp = mywidth - 30
4401 # Convert myoldbest from a list to a string.
4405 for pos, key in enumerate(myoldbest):
4406 key = portage.catpkgsplit(key)[2] + \
4407 "-" + portage.catpkgsplit(key)[3]
4408 if key[-3:] == "-r0":
4410 myoldbest[pos] = key
4411 myoldbest = blue("["+", ".join(myoldbest)+"]")
4414 root_config = self._frozen_config.roots[myroot]
4415 system_set = root_config.sets["system"]
4416 world_set = root_config.sets["selected"]
4421 pkg_system = system_set.findAtomForPackage(pkg)
4422 pkg_world = world_set.findAtomForPackage(pkg)
4423 if not (oneshot or pkg_world) and \
4424 myroot == self._frozen_config.target_root and \
4425 favorites_set.findAtomForPackage(pkg):
4426 # Maybe it will be added to world now.
4427 if create_world_atom(pkg, favorites_set, root_config):
4429 except portage.exception.InvalidDependString:
4430 # This is reported elsewhere if relevant.
4433 def pkgprint(pkg_str):
4436 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4438 return colorize("PKG_MERGE_WORLD", pkg_str)
4440 return colorize("PKG_MERGE", pkg_str)
4441 elif pkg_status == "uninstall":
4442 return colorize("PKG_UNINSTALL", pkg_str)
4445 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4447 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4449 return colorize("PKG_NOMERGE", pkg_str)
4451 if 'interactive' in pkg.metadata.properties and \
4452 pkg.operation == 'merge':
4453 addl = colorize("WARN", "I") + addl[1:]
4455 counters.interactive += 1
4460 if "--columns" in self._frozen_config.myopts:
4461 if "--quiet" in self._frozen_config.myopts:
4462 myprint=addl+" "+indent+pkgprint(pkg_cp)
4463 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4464 myprint=myprint+myoldbest
4465 myprint=myprint+darkgreen("to "+x[1])
4469 myprint = "[%s] %s%s" % \
4470 (pkgprint(pkg_status.ljust(13)),
4471 indent, pkgprint(pkg.cp))
4473 myprint = "[%s %s] %s%s" % \
4474 (pkgprint(pkg.type_name), addl,
4475 indent, pkgprint(pkg.cp))
4476 if (newlp-nc_len(myprint)) > 0:
4477 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4478 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4479 if (oldlp-nc_len(myprint)) > 0:
4480 myprint=myprint+" "*(oldlp-nc_len(myprint))
4481 myprint=myprint+myoldbest
4482 myprint += darkgreen("to " + pkg.root)
4485 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4487 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4488 myprint += indent + pkgprint(pkg_key) + " " + \
4489 myoldbest + darkgreen("to " + myroot)
4491 if "--columns" in self._frozen_config.myopts:
4492 if "--quiet" in self._frozen_config.myopts:
4493 myprint=addl+" "+indent+pkgprint(pkg_cp)
4494 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4495 myprint=myprint+myoldbest
4499 myprint = "[%s] %s%s" % \
4500 (pkgprint(pkg_status.ljust(13)),
4501 indent, pkgprint(pkg.cp))
4503 myprint = "[%s %s] %s%s" % \
4504 (pkgprint(pkg.type_name), addl,
4505 indent, pkgprint(pkg.cp))
4506 if (newlp-nc_len(myprint)) > 0:
4507 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4508 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4509 if (oldlp-nc_len(myprint)) > 0:
4510 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4511 myprint += myoldbest
4514 myprint = "[%s] %s%s %s" % \
4515 (pkgprint(pkg_status.ljust(13)),
4516 indent, pkgprint(pkg.cpv),
4519 myprint = "[%s %s] %s%s %s" % \
4520 (pkgprint(pkg_type), addl, indent,
4521 pkgprint(pkg.cpv), myoldbest)
4523 if columns and pkg.operation == "uninstall":
4525 p.append((myprint, verboseadd, repoadd))
4527 if "--tree" not in self._frozen_config.myopts and \
4528 "--quiet" not in self._frozen_config.myopts and \
4529 not self._frozen_config._opts_no_restart.intersection(self._frozen_config.myopts) and \
4530 pkg.root == self._frozen_config._running_root.root and \
4531 portage.match_from_list(
4532 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4533 not vardb.cpv_exists(pkg.cpv) and \
4534 "--quiet" not in self._frozen_config.myopts:
4535 if mylist_index < len(mylist) - 1:
4536 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4537 p.append(colorize("WARN", " then resume the merge."))
4540 show_repos = repoadd_set and repoadd_set != set(["0"])
4543 if isinstance(x, basestring):
4544 out.write("%s\n" % (x,))
4547 myprint, verboseadd, repoadd = x
4550 myprint += " " + verboseadd
4552 if show_repos and repoadd:
4553 myprint += " " + teal("[%s]" % repoadd)
4555 out.write("%s\n" % (myprint,))
4564 # In python-2.x, str() can trigger a UnicodeEncodeError here,
4565 # so call __str__() directly.
4566 writemsg_stdout(repo_display.__str__(), noiselevel=-1)
4568 if "--changelog" in self._frozen_config.myopts:
4569 writemsg_stdout('\n', noiselevel=-1)
4570 for revision,text in changelogs:
4571 writemsg_stdout(bold('*'+revision) + '\n' + text,
4577 def _tree_display(self, mylist):
4579 # If there are any Uninstall instances, add the
4580 # corresponding blockers to the digraph.
4581 mygraph = self._dynamic_config.digraph.copy()
4583 executed_uninstalls = set(node for node in mylist \
4584 if isinstance(node, Package) and node.operation == "unmerge")
4586 for uninstall in self._dynamic_config._blocker_uninstalls.leaf_nodes():
4587 uninstall_parents = \
4588 self._dynamic_config._blocker_uninstalls.parent_nodes(uninstall)
4589 if not uninstall_parents:
4592 # Remove the corresponding "nomerge" node and substitute
4593 # the Uninstall node.
4594 inst_pkg = self._pkg(uninstall.cpv, "installed",
4595 uninstall.root_config, installed=True)
4598 mygraph.remove(inst_pkg)
4603 inst_pkg_blockers = self._dynamic_config._blocker_parents.child_nodes(inst_pkg)
4605 inst_pkg_blockers = []
4607 # Break the Package -> Uninstall edges.
4608 mygraph.remove(uninstall)
4610 # Resolution of a package's blockers
4611 # depend on it's own uninstallation.
4612 for blocker in inst_pkg_blockers:
4613 mygraph.add(uninstall, blocker)
4615 # Expand Package -> Uninstall edges into
4616 # Package -> Blocker -> Uninstall edges.
4617 for blocker in uninstall_parents:
4618 mygraph.add(uninstall, blocker)
4619 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4620 if parent != inst_pkg:
4621 mygraph.add(blocker, parent)
4623 # If the uninstall task did not need to be executed because
4624 # of an upgrade, display Blocker -> Upgrade edges since the
4625 # corresponding Blocker -> Uninstall edges will not be shown.
4627 self._dynamic_config._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
4628 if upgrade_node is not None and \
4629 uninstall not in executed_uninstalls:
4630 for blocker in uninstall_parents:
4631 mygraph.add(upgrade_node, blocker)
4633 if "--unordered-display" in self._frozen_config.myopts:
4634 display_list = self._unordered_tree_display(mygraph, mylist)
4636 display_list = self._ordered_tree_display(mygraph, mylist)
4638 self._prune_tree_display(display_list)
4642 def _unordered_tree_display(self, mygraph, mylist):
4646 def print_node(node, depth):
4648 if node in seen_nodes:
4651 seen_nodes.add(node)
4653 if isinstance(node, Package):
4654 display_list.append((node, depth, True))
4658 for child_node in mygraph.child_nodes(node):
4659 print_node(child_node, depth + 1)
4661 for root_node in mygraph.root_nodes():
4662 print_node(root_node, 0)
4666 def _ordered_tree_display(self, mygraph, mylist):
4673 depth = len(tree_nodes)
4674 while depth and x not in \
4675 mygraph.child_nodes(tree_nodes[depth-1]):
4678 tree_nodes = tree_nodes[:depth]
4679 tree_nodes.append(x)
4680 display_list.append((x, depth, True))
4681 shown_edges.add((x, tree_nodes[depth-1]))
4683 traversed_nodes = set() # prevent endless circles
4684 traversed_nodes.add(x)
4685 def add_parents(current_node, ordered):
4687 # Do not traverse to parents if this node is an
4688 # an argument or a direct member of a set that has
4689 # been specified as an argument (system or world).
4690 if current_node not in self._dynamic_config._set_nodes:
4691 parent_nodes = mygraph.parent_nodes(current_node)
4693 child_nodes = set(mygraph.child_nodes(current_node))
4694 selected_parent = None
4695 # First, try to avoid a direct cycle.
4696 for node in parent_nodes:
4697 if not isinstance(node, (Blocker, Package)):
4699 if node not in traversed_nodes and \
4700 node not in child_nodes:
4701 edge = (current_node, node)
4702 if edge in shown_edges:
4704 selected_parent = node
4706 if not selected_parent:
4707 # A direct cycle is unavoidable.
4708 for node in parent_nodes:
4709 if not isinstance(node, (Blocker, Package)):
4711 if node not in traversed_nodes:
4712 edge = (current_node, node)
4713 if edge in shown_edges:
4715 selected_parent = node
4718 shown_edges.add((current_node, selected_parent))
4719 traversed_nodes.add(selected_parent)
4720 add_parents(selected_parent, False)
4721 display_list.append((current_node,
4722 len(tree_nodes), ordered))
4723 tree_nodes.append(current_node)
4725 add_parents(x, True)
4729 def _prune_tree_display(self, display_list):
4730 last_merge_depth = 0
4731 for i in range(len(display_list) - 1, -1, -1):
4732 node, depth, ordered = display_list[i]
4733 if not ordered and depth == 0 and i > 0 \
4734 and node == display_list[i-1][0] and \
4735 display_list[i-1][1] == 0:
4736 # An ordered node got a consecutive duplicate
4737 # when the tree was being filled in.
4740 if ordered and isinstance(node, Package) \
4741 and node.operation == 'merge':
4742 last_merge_depth = depth
4744 if depth >= last_merge_depth or \
4745 i < len(display_list) - 1 and \
4746 depth >= display_list[i+1][1]:
4749 def display_problems(self):
4751 Display problems with the dependency graph such as slot collisions.
4752 This is called internally by display() to show the problems _after_
4753 the merge list where it is most likely to be seen, but if display()
4754 is not going to be called then this method should be called explicitly
4755 to ensure that the user is notified of problems with the graph.
4757 All output goes to stderr, except for unsatisfied dependencies which
4758 go to stdout for parsing by programs such as autounmask.
4761 # Note that show_masked_packages() sends it's output to
4762 # stdout, and some programs such as autounmask parse the
4763 # output in cases when emerge bails out. However, when
4764 # show_masked_packages() is called for installed packages
4765 # here, the message is a warning that is more appropriate
4766 # to send to stderr, so temporarily redirect stdout to
4767 # stderr. TODO: Fix output code so there's a cleaner way
4768 # to redirect everything to stderr.
4773 sys.stdout = sys.stderr
4774 self._display_problems()
4780 # This goes to stdout for parsing by programs like autounmask.
4781 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4782 self._show_unsatisfied_dep(*pargs, **kwargs)
4784 def _display_problems(self):
4785 if self._dynamic_config._circular_deps_for_display is not None:
4786 self._show_circular_deps(
4787 self._dynamic_config._circular_deps_for_display)
4789 # The user is only notified of a slot conflict if
4790 # there are no unresolvable blocker conflicts.
4791 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4792 self._show_unsatisfied_blockers(
4793 self._dynamic_config._unsatisfied_blockers_for_display)
4794 elif self._dynamic_config._slot_collision_info:
4795 self._show_slot_collision_notice()
4797 self._show_missed_update()
4799 # TODO: Add generic support for "set problem" handlers so that
4800 # the below warnings aren't special cases for world only.
4802 if self._dynamic_config._missing_args:
4803 world_problems = False
4804 if "world" in self._dynamic_config._sets:
4805 # Filter out indirect members of world (from nested sets)
4806 # since only direct members of world are desired here.
4807 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
4808 for arg, atom in self._dynamic_config._missing_args:
4809 if arg.name in ("selected", "world") and atom in world_set:
4810 world_problems = True
4814 sys.stderr.write("\n!!! Problems have been " + \
4815 "detected with your world file\n")
4816 sys.stderr.write("!!! Please run " + \
4817 green("emaint --check world")+"\n\n")
4819 if self._dynamic_config._missing_args:
4820 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4821 " Ebuilds for the following packages are either all\n")
4822 sys.stderr.write(colorize("BAD", "!!!") + \
4823 " masked or don't exist:\n")
4824 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4825 self._dynamic_config._missing_args) + "\n")
4827 if self._dynamic_config._pprovided_args:
4829 for arg, atom in self._dynamic_config._pprovided_args:
4830 if isinstance(arg, SetArg):
4832 arg_atom = (atom, atom)
4835 arg_atom = (arg.arg, atom)
4836 refs = arg_refs.setdefault(arg_atom, [])
4837 if parent not in refs:
4840 msg.append(bad("\nWARNING: "))
4841 if len(self._dynamic_config._pprovided_args) > 1:
4842 msg.append("Requested packages will not be " + \
4843 "merged because they are listed in\n")
4845 msg.append("A requested package will not be " + \
4846 "merged because it is listed in\n")
4847 msg.append("package.provided:\n\n")
4848 problems_sets = set()
4849 for (arg, atom), refs in arg_refs.items():
4852 problems_sets.update(refs)
4854 ref_string = ", ".join(["'%s'" % name for name in refs])
4855 ref_string = " pulled in by " + ref_string
4856 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4858 if "selected" in problems_sets or "world" in problems_sets:
4859 msg.append("This problem can be solved in one of the following ways:\n\n")
4860 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4861 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4862 msg.append(" C) Remove offending entries from package.provided.\n\n")
4863 msg.append("The best course of action depends on the reason that an offending\n")
4864 msg.append("package.provided entry exists.\n\n")
4865 sys.stderr.write("".join(msg))
4867 masked_packages = []
4868 for pkg in self._dynamic_config._masked_license_updates:
4869 root_config = pkg.root_config
4870 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4871 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4872 masked_packages.append((root_config, pkgsettings,
4873 pkg.cpv, pkg.metadata, mreasons))
4875 writemsg("\n" + colorize("BAD", "!!!") + \
4876 " The following updates are masked by LICENSE changes:\n",
4878 show_masked_packages(masked_packages)
4880 writemsg("\n", noiselevel=-1)
4882 masked_packages = []
4883 for pkg in self._dynamic_config._masked_installed:
4884 root_config = pkg.root_config
4885 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4886 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4887 masked_packages.append((root_config, pkgsettings,
4888 pkg.cpv, pkg.metadata, mreasons))
4890 writemsg("\n" + colorize("BAD", "!!!") + \
4891 " The following installed packages are masked:\n",
4893 show_masked_packages(masked_packages)
4895 writemsg("\n", noiselevel=-1)
4897 def saveNomergeFavorites(self):
4898 """Find atoms in favorites that are not in the mergelist and add them
4899 to the world file if necessary."""
4900 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4901 "--oneshot", "--onlydeps", "--pretend"):
4902 if x in self._frozen_config.myopts:
4904 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4905 world_set = root_config.sets["selected"]
4907 world_locked = False
4908 if hasattr(world_set, "lock"):
4912 if hasattr(world_set, "load"):
4913 world_set.load() # maybe it's changed on disk
4915 args_set = self._dynamic_config._sets["args"]
4916 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
4917 added_favorites = set()
4918 for x in self._dynamic_config._set_nodes:
4919 pkg_type, root, pkg_key, pkg_status = x
4920 if pkg_status != "nomerge":
4924 myfavkey = create_world_atom(x, args_set, root_config)
4926 if myfavkey in added_favorites:
4928 added_favorites.add(myfavkey)
4929 except portage.exception.InvalidDependString as e:
4930 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4931 (pkg_key, str(e)), noiselevel=-1)
4932 writemsg("!!! see '%s'\n\n" % os.path.join(
4933 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4936 for k in self._dynamic_config._sets:
4937 if k in ("args", "selected", "world") or \
4938 not root_config.sets[k].world_candidate:
4943 all_added.append(SETPREFIX + k)
4944 all_added.extend(added_favorites)
4947 print(">>> Recording %s in \"world\" favorites file..." % \
4948 colorize("INFORM", str(a)))
4950 world_set.update(all_added)
4955 def _loadResumeCommand(self, resume_data, skip_masked=True,
4958 Add a resume command to the graph and validate it in the process. This
4959 will raise a PackageNotFound exception if a package is not available.
4964 if not isinstance(resume_data, dict):
4967 mergelist = resume_data.get("mergelist")
4968 if not isinstance(mergelist, list):
4971 fakedb = self._dynamic_config.mydbapi
4972 trees = self._frozen_config.trees
4973 serialized_tasks = []
4976 if not (isinstance(x, list) and len(x) == 4):
4978 pkg_type, myroot, pkg_key, action = x
4979 if pkg_type not in self.pkg_tree_map:
4981 if action != "merge":
4983 root_config = self._frozen_config.roots[myroot]
4985 pkg = self._pkg(pkg_key, pkg_type, root_config)
4986 except portage.exception.PackageNotFound:
4987 # It does no exist or it is corrupt.
4989 # TODO: log these somewhere
4993 if "merge" == pkg.operation and \
4994 not visible(root_config.settings, pkg):
4996 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4998 self._dynamic_config._unsatisfied_deps_for_display.append(
4999 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5001 fakedb[myroot].cpv_inject(pkg)
5002 serialized_tasks.append(pkg)
5003 self._spinner_update()
5005 if self._dynamic_config._unsatisfied_deps_for_display:
5008 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5009 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5010 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5012 self._select_package = self._select_pkg_from_graph
5013 self._dynamic_config.myparams["selective"] = True
5014 # Always traverse deep dependencies in order to account for
5015 # potentially unsatisfied dependencies of installed packages.
5016 # This is necessary for correct --keep-going or --resume operation
5017 # in case a package from a group of circularly dependent packages
5018 # fails. In this case, a package which has recently been installed
5019 # may have an unsatisfied circular dependency (pulled in by
5020 # PDEPEND, for example). So, even though a package is already
5021 # installed, it may not have all of it's dependencies satisfied, so
5022 # it may not be usable. If such a package is in the subgraph of
5023 # deep depenedencies of a scheduled build, that build needs to
5024 # be cancelled. In order for this type of situation to be
5025 # recognized, deep traversal of dependencies is required.
5026 self._dynamic_config.myparams["deep"] = True
5028 favorites = resume_data.get("favorites")
5029 args_set = self._dynamic_config._sets["args"]
5030 if isinstance(favorites, list):
5031 args = self._load_favorites(favorites)
5035 for task in serialized_tasks:
5036 if isinstance(task, Package) and \
5037 task.operation == "merge":
5038 if not self._add_pkg(task, None):
5041 # Packages for argument atoms need to be explicitly
5042 # added via _add_pkg() so that they are included in the
5043 # digraph (needed at least for --tree display).
5045 for atom in arg.set:
5046 pkg, existing_node = self._select_package(
5047 arg.root_config.root, atom)
5048 if existing_node is None and \
5050 if not self._add_pkg(pkg, Dependency(atom=atom,
5051 root=pkg.root, parent=arg)):
5054 # Allow unsatisfied deps here to avoid showing a masking
5055 # message for an unsatisfied dep that isn't necessarily
5057 if not self._create_graph(allow_unsatisfied=True):
5060 unsatisfied_deps = []
5061 for dep in self._dynamic_config._unsatisfied_deps:
5062 if not isinstance(dep.parent, Package):
5064 if dep.parent.operation == "merge":
5065 unsatisfied_deps.append(dep)
5068 # For unsatisfied deps of installed packages, only account for
5069 # them if they are in the subgraph of dependencies of a package
5070 # which is scheduled to be installed.
5071 unsatisfied_install = False
5073 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5075 node = dep_stack.pop()
5076 if not isinstance(node, Package):
5078 if node.operation == "merge":
5079 unsatisfied_install = True
5081 if node in traversed:
5084 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5086 if unsatisfied_install:
5087 unsatisfied_deps.append(dep)
5089 if masked_tasks or unsatisfied_deps:
5090 # This probably means that a required package
5091 # was dropped via --skipfirst. It makes the
5092 # resume list invalid, so convert it to a
5093 # UnsatisfiedResumeDep exception.
5094 raise self.UnsatisfiedResumeDep(self,
5095 masked_tasks + unsatisfied_deps)
5096 self._dynamic_config._serialized_tasks_cache = None
5099 except self._unknown_internal_error:
5104 def _load_favorites(self, favorites):
5106 Use a list of favorites to resume state from a
5107 previous select_files() call. This creates similar
5108 DependencyArg instances to those that would have
5109 been created by the original select_files() call.
5110 This allows Package instances to be matched with
5111 DependencyArg instances during graph creation.
5113 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5114 getSetAtoms = root_config.setconfig.getSetAtoms
5115 sets = root_config.sets
5118 if not isinstance(x, basestring):
5120 if x in ("system", "world"):
5122 if x.startswith(SETPREFIX):
5123 s = x[len(SETPREFIX):]
5126 if s in self._dynamic_config._sets:
5128 # Recursively expand sets so that containment tests in
5129 # self._get_parent_sets() properly match atoms in nested
5130 # sets (like if world contains system).
5131 expanded_set = InternalPackageSet(
5132 initial_atoms=getSetAtoms(s))
5133 self._dynamic_config._sets[s] = expanded_set
5134 args.append(SetArg(arg=x, set=expanded_set,
5135 root_config=root_config))
5139 except portage.exception.InvalidAtom:
5141 args.append(AtomArg(arg=x, atom=x,
5142 root_config=root_config))
5144 self._set_args(args)
5147 class UnsatisfiedResumeDep(portage.exception.PortageException):
5149 A dependency of a resume list is not installed. This
5150 can occur when a required package is dropped from the
5151 merge list via --skipfirst.
5153 def __init__(self, depgraph, value):
5154 portage.exception.PortageException.__init__(self, value)
5155 self.depgraph = depgraph
5157 class _internal_exception(portage.exception.PortageException):
5158 def __init__(self, value=""):
5159 portage.exception.PortageException.__init__(self, value)
5161 class _unknown_internal_error(_internal_exception):
5163 Used by the depgraph internally to terminate graph creation.
5164 The specific reason for the failure should have been dumped
5165 to stderr, unfortunately, the exact reason for the failure
5169 class _serialize_tasks_retry(_internal_exception):
5171 This is raised by the _serialize_tasks() method when it needs to
5172 be called again for some reason. The only case that it's currently
5173 used for is when neglected dependencies need to be added to the
5174 graph in order to avoid making a potentially unsafe decision.
5177 class _backtrack_mask(_internal_exception):
5179 This is raised by _show_unsatisfied_dep() when it's called with
5180 check_backtrack=True and a matching package has been masked by
5184 def need_restart(self):
5185 return self._dynamic_config._need_restart
5187 def get_runtime_pkg_mask(self):
5188 return self._dynamic_config._runtime_pkg_mask.copy()
5190 class _dep_check_composite_db(portage.dbapi):
5192 A dbapi-like interface that is optimized for use in dep_check() calls.
5193 This is built on top of the existing depgraph package selection logic.
5194 Some packages that have been added to the graph may be masked from this
5195 view in order to influence the atom preference selection that occurs
5198 def __init__(self, depgraph, root):
5199 portage.dbapi.__init__(self)
5200 self._depgraph = depgraph
5202 self._match_cache = {}
5203 self._cpv_pkg_map = {}
5205 def _clear_cache(self):
5206 self._match_cache.clear()
5207 self._cpv_pkg_map.clear()
5209 def match(self, atom):
5210 ret = self._match_cache.get(atom)
5215 atom = self._dep_expand(atom)
5216 pkg, existing = self._depgraph._select_package(self._root, atom)
5220 # Return the highest available from select_package() as well as
5221 # any matching slots in the graph db.
5223 slots.add(pkg.metadata["SLOT"])
5224 if pkg.cp.startswith("virtual/"):
5225 # For new-style virtual lookahead that occurs inside
5226 # dep_check(), examine all slots. This is needed
5227 # so that newer slots will not unnecessarily be pulled in
5228 # when a satisfying lower slot is already installed. For
5229 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5230 # there's no need to pull in a newer slot to satisfy a
5231 # virtual/jdk dependency.
5232 for db, pkg_type, built, installed, db_keys in \
5233 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5234 for cpv in db.match(atom):
5235 if portage.cpv_getkey(cpv) != pkg.cp:
5237 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5239 if self._visible(pkg):
5240 self._cpv_pkg_map[pkg.cpv] = pkg
5242 slots.remove(pkg.metadata["SLOT"])
5244 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5245 pkg, existing = self._depgraph._select_package(
5246 self._root, slot_atom)
5249 if not self._visible(pkg):
5251 self._cpv_pkg_map[pkg.cpv] = pkg
5254 self._cpv_sort_ascending(ret)
5255 self._match_cache[orig_atom] = ret
5258 def _visible(self, pkg):
5259 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5261 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5262 except (StopIteration, portage.exception.InvalidDependString):
5269 self._depgraph._frozen_config.pkgsettings[pkg.root], pkg):
5271 except portage.exception.InvalidDependString:
5273 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5274 self._root].get(pkg.slot_atom)
5275 if in_graph is None:
5276 # Mask choices for packages which are not the highest visible
5277 # version within their slot (since they usually trigger slot
5279 highest_visible, in_graph = self._depgraph._select_package(
5280 self._root, pkg.slot_atom)
5281 if pkg != highest_visible:
5283 elif in_graph != pkg:
5284 # Mask choices for packages that would trigger a slot
5285 # conflict with a previously selected package.
5289 def _dep_expand(self, atom):
5291 This is only needed for old installed packages that may
5292 contain atoms that are not fully qualified with a specific
5293 category. Emulate the cpv_expand() function that's used by
5294 dbapi.match() in cases like this. If there are multiple
5295 matches, it's often due to a new-style virtual that has
5296 been added, so try to filter those out to avoid raising
5299 root_config = self._depgraph.roots[self._root]
5301 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5302 if len(expanded_atoms) > 1:
5303 non_virtual_atoms = []
5304 for x in expanded_atoms:
5305 if not portage.dep_getkey(x).startswith("virtual/"):
5306 non_virtual_atoms.append(x)
5307 if len(non_virtual_atoms) == 1:
5308 expanded_atoms = non_virtual_atoms
5309 if len(expanded_atoms) > 1:
5310 # compatible with portage.cpv_expand()
5311 raise portage.exception.AmbiguousPackageName(
5312 [portage.dep_getkey(x) for x in expanded_atoms])
5314 atom = expanded_atoms[0]
5316 null_atom = Atom(insert_category_into_atom(atom, "null"))
5317 cat, atom_pn = portage.catsplit(null_atom.cp)
5318 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5320 # Allow the resolver to choose which virtual.
5321 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5326 def aux_get(self, cpv, wants):
5327 metadata = self._cpv_pkg_map[cpv].metadata
5328 return [metadata.get(x, "") for x in wants]
5330 def match_pkgs(self, atom):
5331 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5333 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5335 if "--quiet" in myopts:
5336 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5337 print("!!! one of the following fully-qualified ebuild names instead:\n")
5338 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5339 print(" " + colorize("INFORM", cp))
5342 s = search(root_config, spinner, "--searchdesc" in myopts,
5343 "--quiet" not in myopts, "--usepkg" in myopts,
5344 "--usepkgonly" in myopts)
5345 null_cp = portage.dep_getkey(insert_category_into_atom(
5347 cat, atom_pn = portage.catsplit(null_cp)
5348 s.searchkey = atom_pn
5349 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5352 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5353 print("!!! one of the above fully-qualified ebuild names instead.\n")
5355 def insert_category_into_atom(atom, category):
5356 alphanum = re.search(r'\w', atom)
5358 ret = atom[:alphanum.start()] + "%s/" % category + \
5359 atom[alphanum.start():]
5364 def _spinner_start(spinner, myopts):
5367 if "--quiet" not in myopts and \
5368 ("--pretend" in myopts or "--ask" in myopts or \
5369 "--tree" in myopts or "--verbose" in myopts):
5371 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5373 elif "--buildpkgonly" in myopts:
5377 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5378 if "--unordered-display" in myopts:
5379 portage.writemsg_stdout("\n" + \
5380 darkgreen("These are the packages that " + \
5381 "would be %s:" % action) + "\n\n")
5383 portage.writemsg_stdout("\n" + \
5384 darkgreen("These are the packages that " + \
5385 "would be %s, in reverse order:" % action) + "\n\n")
5387 portage.writemsg_stdout("\n" + \
5388 darkgreen("These are the packages that " + \
5389 "would be %s, in order:" % action) + "\n\n")
5391 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5392 if not show_spinner:
5393 spinner.update = spinner.update_quiet
5396 portage.writemsg_stdout("Calculating dependencies ")
5398 def _spinner_stop(spinner):
5399 if spinner is None or \
5400 spinner.update is spinner.update_quiet:
5403 portage.writemsg_stdout("\b\b... done!\n")
5405 def backtrack_depgraph(settings, trees, myopts, myparams,
5406 myaction, myfiles, spinner):
5408 Raises PackageSetNotFound if myfiles contains a missing package set.
5410 _spinner_start(spinner, myopts)
5412 return _backtrack_depgraph(settings, trees, myopts, myparams,
5413 myaction, myfiles, spinner)
5415 _spinner_stop(spinner)
5417 def _backtrack_depgraph(settings, trees, myopts, myparams,
5418 myaction, myfiles, spinner):
5420 backtrack_max = myopts.get('--backtrack', 5)
5421 runtime_pkg_mask = None
5422 allow_backtracking = backtrack_max > 0
5424 frozen_config = _frozen_depgraph_config(settings, trees,
5427 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5428 frozen_config=frozen_config,
5429 allow_backtracking=allow_backtracking,
5430 runtime_pkg_mask=runtime_pkg_mask)
5431 success, favorites = mydepgraph.select_files(myfiles)
5433 if mydepgraph.need_restart() and backtracked < backtrack_max:
5434 runtime_pkg_mask = mydepgraph.get_runtime_pkg_mask()
5436 elif backtracked and allow_backtracking:
5437 if "--debug" in myopts:
5439 "\n\nbacktracking aborted after %s tries\n\n" % \
5440 backtracked, noiselevel=-1, level=logging.DEBUG)
5441 # Backtracking failed, so disable it and do
5442 # a plain dep calculation + error message.
5443 allow_backtracking = False
5444 runtime_pkg_mask = None
5449 return (success, mydepgraph, favorites)
5451 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5453 Raises PackageSetNotFound if myfiles contains a missing package set.
5455 _spinner_start(spinner, myopts)
5457 return _resume_depgraph(settings, trees, mtimedb, myopts,
5460 _spinner_stop(spinner)
5462 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5464 Construct a depgraph for the given resume list. This will raise
5465 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5466 TODO: Return reasons for dropped_tasks, for display/logging.
5468 @returns: (success, depgraph, dropped_tasks)
5471 skip_unsatisfied = True
5472 mergelist = mtimedb["resume"]["mergelist"]
5473 dropped_tasks = set()
5474 frozen_config = _frozen_depgraph_config(settings, trees,
5477 mydepgraph = depgraph(settings, trees,
5478 myopts, myparams, spinner, frozen_config=frozen_config)
5480 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5481 skip_masked=skip_masked)
5482 except depgraph.UnsatisfiedResumeDep as e:
5483 if not skip_unsatisfied:
5486 graph = mydepgraph._dynamic_config.digraph
5487 unsatisfied_parents = dict((dep.parent, dep.parent) \
5489 traversed_nodes = set()
5490 unsatisfied_stack = list(unsatisfied_parents)
5491 while unsatisfied_stack:
5492 pkg = unsatisfied_stack.pop()
5493 if pkg in traversed_nodes:
5495 traversed_nodes.add(pkg)
5497 # If this package was pulled in by a parent
5498 # package scheduled for merge, removing this
5499 # package may cause the the parent package's
5500 # dependency to become unsatisfied.
5501 for parent_node in graph.parent_nodes(pkg):
5502 if not isinstance(parent_node, Package) \
5503 or parent_node.operation not in ("merge", "nomerge"):
5506 graph.child_nodes(parent_node,
5507 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5508 if pkg in unsatisfied:
5509 unsatisfied_parents[parent_node] = parent_node
5510 unsatisfied_stack.append(parent_node)
5512 pruned_mergelist = []
5514 if isinstance(x, list) and \
5515 tuple(x) not in unsatisfied_parents:
5516 pruned_mergelist.append(x)
5518 # If the mergelist doesn't shrink then this loop is infinite.
5519 if len(pruned_mergelist) == len(mergelist):
5520 # This happens if a package can't be dropped because
5521 # it's already installed, but it has unsatisfied PDEPEND.
5523 mergelist[:] = pruned_mergelist
5525 # Exclude installed packages that have been removed from the graph due
5526 # to failure to build/install runtime dependencies after the dependent
5527 # package has already been installed.
5528 dropped_tasks.update(pkg for pkg in \
5529 unsatisfied_parents if pkg.operation != "nomerge")
5530 mydepgraph.break_refs(unsatisfied_parents)
5532 del e, graph, traversed_nodes, \
5533 unsatisfied_parents, unsatisfied_stack
5537 return (success, mydepgraph, dropped_tasks)
5539 def get_mask_info(root_config, cpv, pkgsettings,
5540 db, pkg_type, built, installed, db_keys):
5543 metadata = dict(zip(db_keys,
5544 db.aux_get(cpv, db_keys)))
5548 if metadata is None:
5549 mreasons = ["corruption"]
5551 eapi = metadata['EAPI']
5554 if not portage.eapi_is_supported(eapi):
5555 mreasons = ['EAPI %s' % eapi]
5557 pkg = Package(type_name=pkg_type, root_config=root_config,
5558 cpv=cpv, built=built, installed=installed, metadata=metadata)
5559 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5560 return metadata, mreasons
5562 def show_masked_packages(masked_packages):
5563 shown_licenses = set()
5564 shown_comments = set()
5565 # Maybe there is both an ebuild and a binary. Only
5566 # show one of them to avoid redundant appearance.
5568 have_eapi_mask = False
5569 for (root_config, pkgsettings, cpv,
5570 metadata, mreasons) in masked_packages:
5571 if cpv in shown_cpvs:
5574 comment, filename = None, None
5575 if "package.mask" in mreasons:
5576 comment, filename = \
5577 portage.getmaskingreason(
5578 cpv, metadata=metadata,
5579 settings=pkgsettings,
5580 portdb=root_config.trees["porttree"].dbapi,
5581 return_location=True)
5582 missing_licenses = []
5584 if not portage.eapi_is_supported(metadata["EAPI"]):
5585 have_eapi_mask = True
5587 missing_licenses = \
5588 pkgsettings._getMissingLicenses(
5590 except portage.exception.InvalidDependString:
5591 # This will have already been reported
5592 # above via mreasons.
5595 print("- "+cpv+" (masked by: "+", ".join(mreasons)+")")
5597 if comment and comment not in shown_comments:
5598 writemsg_stdout(filename + ":\n" + comment + "\n",
5600 shown_comments.add(comment)
5601 portdb = root_config.trees["porttree"].dbapi
5602 for l in missing_licenses:
5603 l_path = portdb.findLicensePath(l)
5604 if l in shown_licenses:
5606 msg = ("A copy of the '%s' license" + \
5607 " is located at '%s'.") % (l, l_path)
5610 shown_licenses.add(l)
5611 return have_eapi_mask
5613 def show_mask_docs():
5614 print("For more information, see the MASKED PACKAGES section in the emerge")
5615 print("man page or refer to the Gentoo Handbook.")
5617 def filter_iuse_defaults(iuse):
5619 if flag.startswith("+") or flag.startswith("-"):
5624 def show_blocker_docs_link():
5626 print("For more information about " + bad("Blocked Packages") + ", please refer to the following")
5627 print("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):")
5629 print("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked")
5632 def get_masking_status(pkg, pkgsettings, root_config):
5634 mreasons = portage.getmaskingstatus(
5635 pkg, settings=pkgsettings,
5636 portdb=root_config.trees["porttree"].dbapi)
5638 if not pkg.installed:
5639 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5640 mreasons.append("CHOST: %s" % \
5641 pkg.metadata["CHOST"])
5643 for msg_type, msgs in pkg.invalid.items():
5645 mreasons.append("invalid: %s" % (msg,))
5647 if not pkg.metadata["SLOT"]:
5648 mreasons.append("invalid: SLOT is undefined")