1 # Copyright 1999-2009 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
5 from __future__ import print_function
12 from itertools import chain
15 from portage import os
16 from portage import digraph
17 from portage.dep import Atom
18 from portage.output import bold, blue, colorize, create_color_func, darkblue, \
19 darkgreen, green, nc_len, red, teal, turquoise, yellow
20 bad = create_color_func("BAD")
21 from portage._sets import SETPREFIX
22 from portage._sets.base import InternalPackageSet
23 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
24 from portage.util import writemsg_level
26 from _emerge.AtomArg import AtomArg
27 from _emerge.Blocker import Blocker
28 from _emerge.BlockerCache import BlockerCache
29 from _emerge.BlockerDepPriority import BlockerDepPriority
30 from _emerge.changelog import calc_changelog
31 from _emerge.countdown import countdown
32 from _emerge.create_world_atom import create_world_atom
33 from _emerge.Dependency import Dependency
34 from _emerge.DependencyArg import DependencyArg
35 from _emerge.DepPriority import DepPriority
36 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
37 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
38 from _emerge.FakeVartree import FakeVartree
39 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
40 from _emerge.format_size import format_size
41 from _emerge.is_valid_package_atom import is_valid_package_atom
42 from _emerge.Package import Package
43 from _emerge.PackageArg import PackageArg
44 from _emerge.PackageCounters import PackageCounters
45 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
46 from _emerge.RepoDisplay import RepoDisplay
47 from _emerge.RootConfig import RootConfig
48 from _emerge.search import search
49 from _emerge.SetArg import SetArg
50 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
51 from _emerge.UnmergeDepPriority import UnmergeDepPriority
53 if sys.hexversion >= 0x3000000:
57 class _frozen_depgraph_config(object):
59 def __init__(self, settings, trees, myopts, spinner):
60 self.settings = settings
61 self.target_root = settings["ROOT"]
64 if settings.get("PORTAGE_DEBUG", "") == "1":
66 self.spinner = spinner
67 self._running_root = trees["/"]["root_config"]
68 self._opts_no_restart = frozenset(["--buildpkgonly",
69 "--fetchonly", "--fetch-all-uri", "--pretend"])
72 self._trees_orig = trees
74 # All Package instances
76 self._highest_license_masked = {}
78 self.trees[myroot] = {}
79 # Create a RootConfig instance that references
80 # the FakeVartree instead of the real one.
81 self.roots[myroot] = RootConfig(
82 trees[myroot]["vartree"].settings,
84 trees[myroot]["root_config"].setconfig)
85 for tree in ("porttree", "bintree"):
86 self.trees[myroot][tree] = trees[myroot][tree]
87 self.trees[myroot]["vartree"] = \
88 FakeVartree(trees[myroot]["root_config"],
89 pkg_cache=self._pkg_cache)
90 self.pkgsettings[myroot] = portage.config(
91 clone=self.trees[myroot]["vartree"].settings)
93 self._required_set_names = set(["world"])
95 class _dynamic_depgraph_config(object):
97 def __init__(self, depgraph, myparams, allow_backtracking,
99 self.myparams = myparams.copy()
100 self._vdb_loaded = False
101 self._allow_backtracking = allow_backtracking
102 # Maps slot atom to package for each Package added to the graph.
103 self._slot_pkg_map = {}
104 # Maps nodes to the reasons they were selected for reinstallation.
105 self._reinstall_nodes = {}
107 # Contains a filtered view of preferred packages that are selected
108 # from available repositories.
109 self._filtered_trees = {}
110 # Contains installed packages and new packages that have been added
112 self._graph_trees = {}
113 # Caches visible packages returned from _select_package, for use in
114 # depgraph._iter_atoms_for_pkg() SLOT logic.
115 self._visible_pkgs = {}
116 #contains the args created by select_files
117 self._initial_arg_list = []
118 self.digraph = portage.digraph()
119 # contains all sets added to the graph
121 # contains atoms given as arguments
122 self._sets["args"] = InternalPackageSet()
123 # contains all atoms from all sets added to the graph, including
124 # atoms given as arguments
125 self._set_atoms = InternalPackageSet()
126 self._atom_arg_map = {}
127 # contains all nodes pulled in by self._set_atoms
128 self._set_nodes = set()
129 # Contains only Blocker -> Uninstall edges
130 self._blocker_uninstalls = digraph()
131 # Contains only Package -> Blocker edges
132 self._blocker_parents = digraph()
133 # Contains only irrelevant Package -> Blocker edges
134 self._irrelevant_blockers = digraph()
135 # Contains only unsolvable Package -> Blocker edges
136 self._unsolvable_blockers = digraph()
137 # Contains all Blocker -> Blocked Package edges
138 self._blocked_pkgs = digraph()
139 # Contains world packages that have been protected from
140 # uninstallation but may not have been added to the graph
141 # if the graph is not complete yet.
142 self._blocked_world_pkgs = {}
143 self._slot_collision_info = {}
144 # Slot collision nodes are not allowed to block other packages since
145 # blocker validation is only able to account for one package per slot.
146 self._slot_collision_nodes = set()
147 self._parent_atoms = {}
148 self._slot_conflict_parent_atoms = set()
149 self._serialized_tasks_cache = None
150 self._scheduler_graph = None
151 self._displayed_list = None
152 self._pprovided_args = []
153 self._missing_args = []
154 self._masked_installed = set()
155 self._masked_license_updates = set()
156 self._unsatisfied_deps_for_display = []
157 self._unsatisfied_blockers_for_display = None
158 self._circular_deps_for_display = None
160 self._dep_disjunctive_stack = []
161 self._unsatisfied_deps = []
162 self._initially_unsatisfied_deps = []
163 self._ignored_deps = []
164 self._highest_pkg_cache = {}
165 if runtime_pkg_mask is None:
166 runtime_pkg_mask = {}
168 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
169 runtime_pkg_mask.items())
170 self._runtime_pkg_mask = runtime_pkg_mask
171 self._need_restart = False
173 for myroot in depgraph._frozen_config.trees:
174 self._slot_pkg_map[myroot] = {}
175 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
176 # This dbapi instance will model the state that the vdb will
177 # have after new packages have been installed.
178 fakedb = PackageVirtualDbapi(vardb.settings)
180 self.mydbapi[myroot] = fakedb
183 graph_tree.dbapi = fakedb
184 self._graph_trees[myroot] = {}
185 self._filtered_trees[myroot] = {}
186 # Substitute the graph tree for the vartree in dep_check() since we
187 # want atom selections to be consistent with package selections
188 # have already been made.
189 self._graph_trees[myroot]["porttree"] = graph_tree
190 self._graph_trees[myroot]["vartree"] = graph_tree
193 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
194 self._filtered_trees[myroot]["porttree"] = filtered_tree
195 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
197 # Passing in graph_tree as the vartree here could lead to better
198 # atom selections in some cases by causing atoms for packages that
199 # have been added to the graph to be preferred over other choices.
200 # However, it can trigger atom selections that result in
201 # unresolvable direct circular dependencies. For example, this
202 # happens with gwydion-dylan which depends on either itself or
203 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
204 # gwydion-dylan-bin needs to be selected in order to avoid a
205 # an unresolvable direct circular dependency.
207 # To solve the problem described above, pass in "graph_db" so that
208 # packages that have been added to the graph are distinguishable
209 # from other available packages and installed packages. Also, pass
210 # the parent package into self._select_atoms() calls so that
211 # unresolvable direct circular dependencies can be detected and
212 # avoided when possible.
213 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
214 self._filtered_trees[myroot]["vartree"] = \
215 depgraph._frozen_config.trees[myroot]["vartree"]
218 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
219 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
220 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
221 # (db, pkg_type, built, installed, db_keys)
222 if "--usepkgonly" not in depgraph._frozen_config.myopts:
223 db_keys = list(portdb._aux_cache_keys)
224 dbs.append((portdb, "ebuild", False, False, db_keys))
225 if "--usepkg" in depgraph._frozen_config.myopts:
226 db_keys = list(bindb._aux_cache_keys)
227 dbs.append((bindb, "binary", True, False, db_keys))
228 db_keys = list(depgraph._frozen_config._trees_orig[myroot
229 ]["vartree"].dbapi._aux_cache_keys)
230 dbs.append((vardb, "installed", True, True, db_keys))
231 self._filtered_trees[myroot]["dbs"] = dbs
232 if "--usepkg" in depgraph._frozen_config.myopts:
233 depgraph._frozen_config._trees_orig[myroot
234 ]["bintree"].populate(
235 "--getbinpkg" in depgraph._frozen_config.myopts,
236 "--getbinpkgonly" in depgraph._frozen_config.myopts)
238 class depgraph(object):
240 pkg_tree_map = RootConfig.pkg_tree_map
242 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
244 def __init__(self, settings, trees, myopts, myparams, spinner,
245 frozen_config=None, runtime_pkg_mask=None, allow_backtracking=False):
246 if frozen_config is None:
247 frozen_config = _frozen_depgraph_config(settings, trees,
249 self._frozen_config = frozen_config
250 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
251 allow_backtracking, runtime_pkg_mask)
253 self._select_atoms = self._select_atoms_highest_available
254 self._select_package = self._select_pkg_highest_available
258 Load installed package metadata if appropriate. This used to be called
259 from the constructor, but that wasn't very nice since this procedure
260 is slow and it generates spinner output. So, now it's called on-demand
261 by various methods when necessary.
264 if self._dynamic_config._vdb_loaded:
267 for myroot in self._frozen_config.trees:
269 preload_installed_pkgs = \
270 "--nodeps" not in self._frozen_config.myopts and \
271 "--buildpkgonly" not in self._frozen_config.myopts
273 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
274 if not fake_vartree.dbapi:
275 # This needs to be called for the first depgraph, but not for
276 # backtracking depgraphs that share the same frozen_config.
279 if preload_installed_pkgs:
280 vardb = fake_vartree.dbapi
281 fakedb = self._dynamic_config._graph_trees[
282 myroot]["vartree"].dbapi
285 self._spinner_update()
286 # This triggers metadata updates via FakeVartree.
287 vardb.aux_get(pkg.cpv, [])
288 fakedb.cpv_inject(pkg)
290 # Now that the vardb state is cached in our FakeVartree,
291 # we won't be needing the real vartree cache for awhile.
292 # To make some room on the heap, clear the vardbapi
294 self._frozen_config._trees_orig[myroot
295 ]["vartree"].dbapi._clear_cache()
298 self._dynamic_config._vdb_loaded = True
300 def _spinner_update(self):
301 if self._frozen_config.spinner:
302 self._frozen_config.spinner.update()
304 def _show_missed_update(self):
306 if '--quiet' in self._frozen_config.myopts and \
307 '--debug' not in self._frozen_config.myopts:
310 # In order to minimize noise, show only the highest
311 # missed update from each SLOT.
313 for pkg, mask_reasons in \
314 self._dynamic_config._runtime_pkg_mask.items():
316 # Exclude installed here since we only
317 # want to show available updates.
319 k = (pkg.root, pkg.slot_atom)
320 if k in missed_updates:
321 other_pkg, mask_type, parent_atoms = missed_updates[k]
324 for mask_type, parent_atoms in mask_reasons.items():
327 missed_updates[k] = (pkg, mask_type, parent_atoms)
330 if not missed_updates:
333 missed_update_types = {}
334 for pkg, mask_type, parent_atoms in missed_updates.values():
335 missed_update_types.setdefault(mask_type,
336 []).append((pkg, parent_atoms))
338 self._show_missed_update_slot_conflicts(
339 missed_update_types.get("slot conflict"))
341 self._show_missed_update_unsatisfied_dep(
342 missed_update_types.get("missing dependency"))
344 def _show_missed_update_unsatisfied_dep(self, missed_updates):
346 if not missed_updates:
349 write = sys.stderr.write
350 backtrack_masked = []
352 for pkg, parent_atoms in missed_updates:
355 for parent, root, atom in parent_atoms:
356 self._show_unsatisfied_dep(root, atom, myparent=parent,
357 check_backtrack=True)
358 except self._backtrack_mask:
359 # This is displayed below in abbreviated form.
360 backtrack_masked.append((pkg, parent_atoms))
363 write("\n!!! The following update has been skipped " + \
364 "due to unsatisfied dependencies:\n\n")
366 write(str(pkg.slot_atom))
368 write(" for %s" % (pkg.root,))
371 for parent, root, atom in parent_atoms:
372 self._show_unsatisfied_dep(root, atom, myparent=parent)
376 # These are shown in abbreviated form, in order to avoid terminal
377 # flooding from mask messages as reported in bug #285832.
378 write("\n!!! The following update(s) have been skipped " + \
379 "due to unsatisfied dependencies\n" + \
380 "!!! triggered by backtracking:\n\n")
381 for pkg, parent_atoms in backtrack_masked:
382 write(str(pkg.slot_atom))
384 write(" for %s" % (pkg.root,))
389 def _show_missed_update_slot_conflicts(self, missed_updates):
391 if not missed_updates:
395 msg.append("\n!!! One or more updates have been skipped due to " + \
396 "a dependency conflict:\n\n")
399 for pkg, parent_atoms in missed_updates:
400 msg.append(str(pkg.slot_atom))
402 msg.append(" for %s" % (pkg.root,))
405 for parent, atom in parent_atoms:
409 msg.append(" conflicts with\n")
411 if isinstance(parent,
412 (PackageArg, AtomArg)):
413 # For PackageArg and AtomArg types, it's
414 # redundant to display the atom attribute.
415 msg.append(str(parent))
417 # Display the specific atom from SetArg or
419 msg.append("%s required by %s" % (atom, parent))
423 sys.stderr.write("".join(msg))
426 def _show_slot_collision_notice(self):
427 """Show an informational message advising the user to mask one of the
428 the packages. In some cases it may be possible to resolve this
429 automatically, but support for backtracking (removal nodes that have
430 already been selected) will be required in order to handle all possible
434 if not self._dynamic_config._slot_collision_info:
437 self._show_merge_list()
440 msg.append("\n!!! Multiple package instances within a single " + \
441 "package slot have been pulled\n")
442 msg.append("!!! into the dependency graph, resulting" + \
443 " in a slot conflict:\n\n")
445 # Max number of parents shown, to avoid flooding the display.
447 explanation_columns = 70
449 for (slot_atom, root), slot_nodes \
450 in self._dynamic_config._slot_collision_info.items():
451 msg.append(str(slot_atom))
453 msg.append(" for %s" % (root,))
456 for node in slot_nodes:
458 msg.append(str(node))
459 parent_atoms = self._dynamic_config._parent_atoms.get(node)
462 # Prefer conflict atoms over others.
463 for parent_atom in parent_atoms:
464 if len(pruned_list) >= max_parents:
466 if parent_atom in self._dynamic_config._slot_conflict_parent_atoms:
467 pruned_list.add(parent_atom)
469 # If this package was pulled in by conflict atoms then
470 # show those alone since those are the most interesting.
472 # When generating the pruned list, prefer instances
473 # of DependencyArg over instances of Package.
474 for parent_atom in parent_atoms:
475 if len(pruned_list) >= max_parents:
477 parent, atom = parent_atom
478 if isinstance(parent, DependencyArg):
479 pruned_list.add(parent_atom)
480 # Prefer Packages instances that themselves have been
481 # pulled into collision slots.
482 for parent_atom in parent_atoms:
483 if len(pruned_list) >= max_parents:
485 parent, atom = parent_atom
486 if isinstance(parent, Package) and \
487 (parent.slot_atom, parent.root) \
488 in self._dynamic_config._slot_collision_info:
489 pruned_list.add(parent_atom)
490 for parent_atom in parent_atoms:
491 if len(pruned_list) >= max_parents:
493 pruned_list.add(parent_atom)
494 omitted_parents = len(parent_atoms) - len(pruned_list)
495 parent_atoms = pruned_list
496 msg.append(" pulled in by\n")
497 for parent_atom in parent_atoms:
498 parent, atom = parent_atom
500 if isinstance(parent,
501 (PackageArg, AtomArg)):
502 # For PackageArg and AtomArg types, it's
503 # redundant to display the atom attribute.
504 msg.append(str(parent))
506 # Display the specific atom from SetArg or
508 msg.append("%s required by %s" % (atom, parent))
512 msg.append("(and %d more)\n" % omitted_parents)
514 msg.append(" (no parents)\n")
516 explanation = self._slot_conflict_explanation(slot_nodes)
519 msg.append(indent + "Explanation:\n\n")
520 for line in textwrap.wrap(explanation, explanation_columns):
521 msg.append(2*indent + line + "\n")
524 sys.stderr.write("".join(msg))
527 explanations_for_all = explanations == len(self._dynamic_config._slot_collision_info)
529 if explanations_for_all or "--quiet" in self._frozen_config.myopts:
533 msg.append("It may be possible to solve this problem ")
534 msg.append("by using package.mask to prevent one of ")
535 msg.append("those packages from being selected. ")
536 msg.append("However, it is also possible that conflicting ")
537 msg.append("dependencies exist such that they are impossible to ")
538 msg.append("satisfy simultaneously. If such a conflict exists in ")
539 msg.append("the dependencies of two different packages, then those ")
540 msg.append("packages can not be installed simultaneously.")
541 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
542 if not self._dynamic_config._allow_backtracking and \
543 (backtrack_opt is None or \
544 (backtrack_opt > 0 and backtrack_opt < 30)):
545 msg.append(" You may want to try a larger value of the ")
546 msg.append("--backtrack option, such as --backtrack=30, ")
547 msg.append("in order to see if that will solve this conflict ")
548 msg.append("automatically.")
550 from formatter import AbstractFormatter, DumbWriter
551 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
553 f.add_flowing_data(x)
557 msg.append("For more information, see MASKED PACKAGES ")
558 msg.append("section in the emerge man page or refer ")
559 msg.append("to the Gentoo Handbook.")
561 f.add_flowing_data(x)
565 def _slot_conflict_explanation(self, slot_nodes):
567 When a slot conflict occurs due to USE deps, there are a few
568 different cases to consider:
570 1) New USE are correctly set but --newuse wasn't requested so an
571 installed package with incorrect USE happened to get pulled
572 into graph before the new one.
574 2) New USE are incorrectly set but an installed package has correct
575 USE so it got pulled into the graph, and a new instance also got
576 pulled in due to --newuse or an upgrade.
578 3) Multiple USE deps exist that can't be satisfied simultaneously,
579 and multiple package instances got pulled into the same slot to
580 satisfy the conflicting deps.
582 Currently, explanations and suggested courses of action are generated
583 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
586 if len(slot_nodes) != 2:
587 # Suggestions are only implemented for
588 # conflicts between two packages.
591 all_conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms
594 unmatched_node = None
595 for node in slot_nodes:
596 parent_atoms = self._dynamic_config._parent_atoms.get(node)
598 # Normally, there are always parent atoms. If there are
599 # none then something unexpected is happening and there's
600 # currently no suggestion for this case.
602 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
603 for parent_atom in conflict_atoms:
604 parent, atom = parent_atom
606 # Suggestions are currently only implemented for cases
607 # in which all conflict atoms have USE deps.
610 if matched_node is not None:
611 # If conflict atoms match multiple nodes
612 # then there's no suggestion.
615 matched_atoms = conflict_atoms
617 if unmatched_node is not None:
618 # Neither node is matched by conflict atoms, and
619 # there is no suggestion for this case.
621 unmatched_node = node
623 if matched_node is None or unmatched_node is None:
624 # This shouldn't happen.
627 if unmatched_node.installed and not matched_node.installed and \
628 unmatched_node.cpv == matched_node.cpv:
629 # If the conflicting packages are the same version then
630 # --newuse should be all that's needed. If they are different
631 # versions then there's some other problem.
632 return "New USE are correctly set, but --newuse wasn't" + \
633 " requested, so an installed package with incorrect USE " + \
634 "happened to get pulled into the dependency graph. " + \
635 "In order to solve " + \
636 "this, either specify the --newuse option or explicitly " + \
637 " reinstall '%s'." % matched_node.slot_atom
639 if matched_node.installed and not unmatched_node.installed:
640 atoms = sorted(set(atom for parent, atom in matched_atoms))
641 explanation = ("New USE for '%s' are incorrectly set. " + \
642 "In order to solve this, adjust USE to satisfy '%s'") % \
643 (matched_node.slot_atom, atoms[0])
645 for atom in atoms[1:-1]:
646 explanation += ", '%s'" % (atom,)
649 explanation += " and '%s'" % (atoms[-1],)
655 def _process_slot_conflicts(self):
657 Process slot conflict data to identify specific atoms which
658 lead to conflict. These atoms only match a subset of the
659 packages that have been pulled into a given slot.
661 for (slot_atom, root), slot_nodes \
662 in self._dynamic_config._slot_collision_info.items():
664 all_parent_atoms = set()
665 for pkg in slot_nodes:
666 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
669 all_parent_atoms.update(parent_atoms)
671 for pkg in slot_nodes:
672 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
673 if parent_atoms is None:
675 self._dynamic_config._parent_atoms[pkg] = parent_atoms
676 for parent_atom in all_parent_atoms:
677 if parent_atom in parent_atoms:
679 # Use package set for matching since it will match via
680 # PROVIDE when necessary, while match_from_list does not.
681 parent, atom = parent_atom
682 atom_set = InternalPackageSet(
683 initial_atoms=(atom,))
684 if atom_set.findAtomForPackage(pkg):
685 parent_atoms.add(parent_atom)
687 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
689 def _reinstall_for_flags(self, forced_flags,
690 orig_use, orig_iuse, cur_use, cur_iuse):
691 """Return a set of flags that trigger reinstallation, or None if there
692 are no such flags."""
693 if "--newuse" in self._frozen_config.myopts or \
694 "--binpkg-respect-use" in self._frozen_config.myopts:
695 flags = set(orig_iuse.symmetric_difference(
696 cur_iuse).difference(forced_flags))
697 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
698 cur_iuse.intersection(cur_use)))
701 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
702 flags = orig_iuse.intersection(orig_use).symmetric_difference(
703 cur_iuse.intersection(cur_use))
708 def _create_graph(self, allow_unsatisfied=False):
709 dep_stack = self._dynamic_config._dep_stack
710 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
711 while dep_stack or dep_disjunctive_stack:
712 self._spinner_update()
714 dep = dep_stack.pop()
715 if isinstance(dep, Package):
716 if not self._add_pkg_deps(dep,
717 allow_unsatisfied=allow_unsatisfied):
720 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
722 if dep_disjunctive_stack:
723 if not self._pop_disjunction(allow_unsatisfied):
727 def _add_dep(self, dep, allow_unsatisfied=False):
728 debug = "--debug" in self._frozen_config.myopts
729 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
730 nodeps = "--nodeps" in self._frozen_config.myopts
731 empty = "empty" in self._dynamic_config.myparams
732 deep = self._dynamic_config.myparams.get("deep", 0)
733 recurse = empty or deep is True or dep.depth <= deep
735 if not buildpkgonly and \
737 dep.parent not in self._dynamic_config._slot_collision_nodes:
738 if dep.parent.onlydeps:
739 # It's safe to ignore blockers if the
740 # parent is an --onlydeps node.
742 # The blocker applies to the root where
743 # the parent is or will be installed.
744 blocker = Blocker(atom=dep.atom,
745 eapi=dep.parent.metadata["EAPI"],
746 root=dep.parent.root)
747 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
750 if dep.child is None:
751 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
752 onlydeps=dep.onlydeps)
754 # The caller has selected a specific package
755 # via self._minimize_packages().
757 existing_node = self._dynamic_config._slot_pkg_map[
758 dep.root].get(dep_pkg.slot_atom)
759 if existing_node is not dep_pkg:
763 if dep.priority.optional:
764 # This could be an unecessary build-time dep
765 # pulled in by --with-bdeps=y.
767 if allow_unsatisfied:
768 self._dynamic_config._unsatisfied_deps.append(dep)
770 self._dynamic_config._unsatisfied_deps_for_display.append(
771 ((dep.root, dep.atom), {"myparent":dep.parent}))
773 # The parent node should not already be in
774 # runtime_pkg_mask, since that would trigger an
775 # infinite backtracking loop.
776 if self._dynamic_config._allow_backtracking:
777 if dep.parent in self._dynamic_config._runtime_pkg_mask:
778 if "--debug" in self._frozen_config.myopts:
780 "!!! backtracking loop detected: %s %s\n" % \
782 self._dynamic_config._runtime_pkg_mask[
783 dep.parent]), noiselevel=-1)
785 # Do not backtrack if only USE have to be changed in
786 # order to satisfy the dependency.
787 dep_pkg, existing_node = \
788 self._select_package(dep.root, dep.atom.without_use,
789 onlydeps=dep.onlydeps)
791 self._dynamic_config._runtime_pkg_mask.setdefault(
792 dep.parent, {})["missing dependency"] = \
793 set([(dep.parent, dep.root, dep.atom)])
794 self._dynamic_config._need_restart = True
795 if "--debug" in self._frozen_config.myopts:
799 msg.append("backtracking due to unsatisfied dep:")
800 msg.append(" parent: %s" % dep.parent)
801 msg.append(" priority: %s" % dep.priority)
802 msg.append(" root: %s" % dep.root)
803 msg.append(" atom: %s" % dep.atom)
805 writemsg_level("".join("%s\n" % l for l in msg),
806 noiselevel=-1, level=logging.DEBUG)
809 # In some cases, dep_check will return deps that shouldn't
810 # be proccessed any further, so they are identified and
811 # discarded here. Try to discard as few as possible since
812 # discarded dependencies reduce the amount of information
813 # available for optimization of merge order.
814 if dep.priority.satisfied and \
815 not dep_pkg.installed and \
816 not (existing_node or recurse):
818 if dep.root == self._frozen_config.target_root:
820 myarg = next(self._iter_atoms_for_pkg(dep_pkg))
821 except StopIteration:
823 except portage.exception.InvalidDependString:
824 if not dep_pkg.installed:
825 # This shouldn't happen since the package
826 # should have been masked.
829 self._dynamic_config._ignored_deps.append(dep)
832 if not self._add_pkg(dep_pkg, dep):
836 def _add_pkg(self, pkg, dep):
843 myparent = dep.parent
844 priority = dep.priority
847 priority = DepPriority()
849 Fills the digraph with nodes comprised of packages to merge.
850 mybigkey is the package spec of the package to merge.
851 myparent is the package depending on mybigkey ( or None )
852 addme = Should we add this package to the digraph or are we just looking at it's deps?
853 Think --onlydeps, we need to ignore packages in that case.
856 #IUSE-aware emerge -> USE DEP aware depgraph
857 #"no downgrade" emerge
859 # Ensure that the dependencies of the same package
860 # are never processed more than once.
861 previously_added = pkg in self._dynamic_config.digraph
863 # select the correct /var database that we'll be checking against
864 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
865 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
870 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
871 except portage.exception.InvalidDependString as e:
872 if not pkg.installed:
873 show_invalid_depstring_notice(
874 pkg, pkg.metadata["PROVIDE"], str(e))
879 if not pkg.installed and \
880 "empty" not in self._dynamic_config.myparams and \
881 vardbapi.match(pkg.slot_atom):
882 # Increase the priority of dependencies on packages that
883 # are being rebuilt. This optimizes merge order so that
884 # dependencies are rebuilt/updated as soon as possible,
885 # which is needed especially when emerge is called by
886 # revdep-rebuild since dependencies may be affected by ABI
887 # breakage that has rendered them useless. Don't adjust
888 # priority here when in "empty" mode since all packages
889 # are being merged in that case.
890 priority.rebuild = True
892 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
893 slot_collision = False
895 existing_node_matches = pkg.cpv == existing_node.cpv
896 if existing_node_matches and \
897 pkg != existing_node and \
898 dep.atom is not None:
899 # Use package set for matching since it will match via
900 # PROVIDE when necessary, while match_from_list does not.
901 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
902 if not atom_set.findAtomForPackage(existing_node):
903 existing_node_matches = False
904 if existing_node_matches:
905 # The existing node can be reused.
907 for parent_atom in arg_atoms:
908 parent, atom = parent_atom
909 self._dynamic_config.digraph.add(existing_node, parent,
911 self._add_parent_atom(existing_node, parent_atom)
912 # If a direct circular dependency is not an unsatisfied
913 # buildtime dependency then drop it here since otherwise
914 # it can skew the merge order calculation in an unwanted
916 if existing_node != myparent or \
917 (priority.buildtime and not priority.satisfied):
918 self._dynamic_config.digraph.addnode(existing_node, myparent,
920 if dep.atom is not None and dep.parent is not None:
921 self._add_parent_atom(existing_node,
922 (dep.parent, dep.atom))
925 # A slot conflict has occurred.
926 # The existing node should not already be in
927 # runtime_pkg_mask, since that would trigger an
928 # infinite backtracking loop.
929 if self._dynamic_config._allow_backtracking and \
931 self._dynamic_config._runtime_pkg_mask:
932 if "--debug" in self._frozen_config.myopts:
934 "!!! backtracking loop detected: %s %s\n" % \
936 self._dynamic_config._runtime_pkg_mask[
937 existing_node]), noiselevel=-1)
938 elif self._dynamic_config._allow_backtracking and \
939 not self._accept_blocker_conflicts():
940 self._add_slot_conflict(pkg)
941 if dep.atom is not None and dep.parent is not None:
942 self._add_parent_atom(pkg, (dep.parent, dep.atom))
944 for parent_atom in arg_atoms:
945 parent, atom = parent_atom
946 self._add_parent_atom(pkg, parent_atom)
947 self._process_slot_conflicts()
950 self._dynamic_config._parent_atoms.get(pkg, set())
952 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
954 parent_atoms = conflict_atoms
955 if pkg >= existing_node:
956 # We only care about the parent atoms
957 # when they trigger a downgrade.
960 self._dynamic_config._runtime_pkg_mask.setdefault(
961 existing_node, {})["slot conflict"] = parent_atoms
962 self._dynamic_config._need_restart = True
963 if "--debug" in self._frozen_config.myopts:
967 msg.append("backtracking due to slot conflict:")
968 msg.append(" package: %s" % existing_node)
969 msg.append(" slot: %s" % existing_node.slot_atom)
970 msg.append(" parents: %s" % \
971 [(str(parent), atom) \
972 for parent, atom in parent_atoms])
974 writemsg_level("".join("%s\n" % l for l in msg),
975 noiselevel=-1, level=logging.DEBUG)
978 # A slot collision has occurred. Sometimes this coincides
979 # with unresolvable blockers, so the slot collision will be
980 # shown later if there are no unresolvable blockers.
981 self._add_slot_conflict(pkg)
982 slot_collision = True
985 # Now add this node to the graph so that self.display()
986 # can show use flags and --tree portage.output. This node is
987 # only being partially added to the graph. It must not be
988 # allowed to interfere with the other nodes that have been
989 # added. Do not overwrite data for existing nodes in
990 # self._dynamic_config.mydbapi since that data will be used for blocker
992 # Even though the graph is now invalid, continue to process
993 # dependencies so that things like --fetchonly can still
994 # function despite collisions.
996 elif not previously_added:
997 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
998 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
999 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1000 self._check_masks(pkg)
1002 if not pkg.installed:
1003 # Allow this package to satisfy old-style virtuals in case it
1004 # doesn't already. Any pre-existing providers will be preferred
1007 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1008 # For consistency, also update the global virtuals.
1009 settings = self._frozen_config.roots[pkg.root].settings
1011 settings.setinst(pkg.cpv, pkg.metadata)
1013 except portage.exception.InvalidDependString as e:
1014 show_invalid_depstring_notice(
1015 pkg, pkg.metadata["PROVIDE"], str(e))
1020 self._dynamic_config._set_nodes.add(pkg)
1022 # Do this even when addme is False (--onlydeps) so that the
1023 # parent/child relationship is always known in case
1024 # self._show_slot_collision_notice() needs to be called later.
1025 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1026 if dep.atom is not None and dep.parent is not None:
1027 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1030 for parent_atom in arg_atoms:
1031 parent, atom = parent_atom
1032 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1033 self._add_parent_atom(pkg, parent_atom)
1035 """ This section determines whether we go deeper into dependencies or not.
1036 We want to go deeper on a few occasions:
1037 Installing package A, we need to make sure package A's deps are met.
1038 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1039 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1044 deep = self._dynamic_config.myparams.get("deep", 0)
1045 empty = "empty" in self._dynamic_config.myparams
1046 recurse = empty or deep is True or depth + 1 <= deep
1047 dep_stack = self._dynamic_config._dep_stack
1048 if "recurse" not in self._dynamic_config.myparams:
1050 elif pkg.installed and not recurse:
1051 dep_stack = self._dynamic_config._ignored_deps
1053 self._spinner_update()
1055 if not previously_added:
1056 dep_stack.append(pkg)
1059 def _check_masks(self, pkg):
1061 slot_key = (pkg.root, pkg.slot_atom)
1063 # Check for upgrades in the same slot that are
1064 # masked due to a LICENSE change in a newer
1065 # version that is not masked for any other reason.
1066 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1067 if other_pkg is not None and pkg < other_pkg:
1068 self._dynamic_config._masked_license_updates.add(other_pkg)
1070 def _add_parent_atom(self, pkg, parent_atom):
1071 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1072 if parent_atoms is None:
1073 parent_atoms = set()
1074 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1075 parent_atoms.add(parent_atom)
1077 def _add_slot_conflict(self, pkg):
1078 self._dynamic_config._slot_collision_nodes.add(pkg)
1079 slot_key = (pkg.slot_atom, pkg.root)
1080 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1081 if slot_nodes is None:
1083 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1084 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1087 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1089 mytype = pkg.type_name
1092 metadata = pkg.metadata
1093 myuse = pkg.use.enabled
1095 depth = pkg.depth + 1
1096 removal_action = "remove" in self._dynamic_config.myparams
1099 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1101 edepend[k] = metadata[k]
1103 if not pkg.built and \
1104 "--buildpkgonly" in self._frozen_config.myopts and \
1105 "deep" not in self._dynamic_config.myparams and \
1106 "empty" not in self._dynamic_config.myparams:
1107 edepend["RDEPEND"] = ""
1108 edepend["PDEPEND"] = ""
1109 bdeps_optional = False
1111 if pkg.built and not removal_action:
1112 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1113 # Pull in build time deps as requested, but marked them as
1114 # "optional" since they are not strictly required. This allows
1115 # more freedom in the merge order calculation for solving
1116 # circular dependencies. Don't convert to PDEPEND since that
1117 # could make --with-bdeps=y less effective if it is used to
1118 # adjust merge order to prevent built_with_use() calls from
1120 bdeps_optional = True
1122 # built packages do not have build time dependencies.
1123 edepend["DEPEND"] = ""
1125 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1126 edepend["DEPEND"] = ""
1132 root_deps = self._frozen_config.myopts.get("--root-deps")
1133 if root_deps is not None:
1134 if root_deps is True:
1136 elif root_deps == "rdeps":
1137 edepend["DEPEND"] = ""
1140 (bdeps_root, edepend["DEPEND"],
1141 self._priority(buildtime=(not bdeps_optional),
1142 optional=bdeps_optional)),
1143 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
1144 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
1147 debug = "--debug" in self._frozen_config.myopts
1148 strict = mytype != "installed"
1151 portage.dep._dep_check_strict = False
1153 for dep_root, dep_string, dep_priority in deps:
1157 writemsg_level("\nParent: %s\n" % (pkg,),
1158 noiselevel=-1, level=logging.DEBUG)
1159 writemsg_level("Depstring: %s\n" % (dep_string,),
1160 noiselevel=-1, level=logging.DEBUG)
1161 writemsg_level("Priority: %s\n" % (dep_priority,),
1162 noiselevel=-1, level=logging.DEBUG)
1166 dep_string = portage.dep.paren_normalize(
1167 portage.dep.use_reduce(
1168 portage.dep.paren_reduce(dep_string),
1169 uselist=pkg.use.enabled))
1171 dep_string = list(self._queue_disjunctive_deps(
1172 pkg, dep_root, dep_priority, dep_string))
1174 except portage.exception.InvalidDependString as e:
1178 show_invalid_depstring_notice(pkg, dep_string, str(e))
1184 dep_string = portage.dep.paren_enclose(dep_string)
1186 if not self._add_pkg_dep_string(
1187 pkg, dep_root, dep_priority, dep_string,
1191 except portage.exception.AmbiguousPackageName as e:
1193 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1194 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1196 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1197 portage.writemsg("\n", noiselevel=-1)
1198 if mytype == "binary":
1200 "!!! This binary package cannot be installed: '%s'\n" % \
1201 mykey, noiselevel=-1)
1202 elif mytype == "ebuild":
1203 portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
1204 myebuild, mylocation = portdb.findname2(mykey)
1205 portage.writemsg("!!! This ebuild cannot be installed: " + \
1206 "'%s'\n" % myebuild, noiselevel=-1)
1207 portage.writemsg("!!! Please notify the package maintainer " + \
1208 "that atoms must be fully-qualified.\n", noiselevel=-1)
1211 portage.dep._dep_check_strict = True
1214 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1216 depth = pkg.depth + 1
1217 debug = "--debug" in self._frozen_config.myopts
1218 strict = pkg.type_name != "installed"
1221 writemsg_level("\nParent: %s\n" % (pkg,),
1222 noiselevel=-1, level=logging.DEBUG)
1223 writemsg_level("Depstring: %s\n" % (dep_string,),
1224 noiselevel=-1, level=logging.DEBUG)
1225 writemsg_level("Priority: %s\n" % (dep_priority,),
1226 noiselevel=-1, level=logging.DEBUG)
1229 selected_atoms = self._select_atoms(dep_root,
1230 dep_string, myuse=pkg.use.enabled, parent=pkg,
1231 strict=strict, priority=dep_priority)
1232 except portage.exception.InvalidDependString as e:
1233 show_invalid_depstring_notice(pkg, dep_string, str(e))
1240 writemsg_level("Candidates: %s\n" % \
1241 ([str(x) for x in selected_atoms[pkg]],),
1242 noiselevel=-1, level=logging.DEBUG)
1244 root_config = self._frozen_config.roots[dep_root]
1245 vardb = root_config.trees["vartree"].dbapi
1247 for atom, child in self._minimize_children(
1248 pkg, dep_priority, root_config, selected_atoms[pkg]):
1250 mypriority = dep_priority.copy()
1251 if not atom.blocker and vardb.match(atom):
1252 mypriority.satisfied = True
1254 if not self._add_dep(Dependency(atom=atom,
1255 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1256 priority=mypriority, root=dep_root),
1257 allow_unsatisfied=allow_unsatisfied):
1260 selected_atoms.pop(pkg)
1262 # Add selected indirect virtual deps to the graph. This
1263 # takes advantage of circular dependency avoidance that's done
1264 # by dep_zapdeps. We preserve actual parent/child relationships
1265 # here in order to avoid distorting the dependency graph like
1266 # <=portage-2.1.6.x did.
1267 for virt_pkg, atoms in selected_atoms.items():
1270 writemsg_level("Candidates: %s: %s\n" % \
1271 (virt_pkg.cpv, [str(x) for x in atoms]),
1272 noiselevel=-1, level=logging.DEBUG)
1274 # Just assume depth + 1 here for now, though it's not entirely
1275 # accurate since multilple levels of indirect virtual deps may
1276 # have been traversed. The _add_pkg call will reset the depth to
1277 # 0 if this package happens to match an argument.
1278 if not self._add_pkg(virt_pkg,
1279 Dependency(atom=Atom('=' + virt_pkg.cpv),
1280 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1284 for atom, child in self._minimize_children(
1285 pkg, self._priority(runtime=True), root_config, atoms):
1286 # This is a GLEP 37 virtual, so its deps are all runtime.
1287 mypriority = self._priority(runtime=True)
1288 if not atom.blocker and vardb.match(atom):
1289 mypriority.satisfied = True
1291 if not self._add_dep(Dependency(atom=atom,
1292 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1293 parent=virt_pkg, priority=mypriority, root=dep_root),
1294 allow_unsatisfied=allow_unsatisfied):
1298 writemsg_level("Exiting... %s\n" % (pkg,),
1299 noiselevel=-1, level=logging.DEBUG)
1303 def _minimize_children(self, parent, priority, root_config, atoms):
1305 Selects packages to satisfy the given atoms, and minimizes the
1306 number of selected packages. This serves to identify and eliminate
1307 redundant package selections when multiple atoms happen to specify
1317 dep_pkg, existing_node = self._select_package(
1318 root_config.root, atom)
1322 atom_pkg_map[atom] = dep_pkg
1324 if len(atom_pkg_map) < 2:
1325 for item in atom_pkg_map.items():
1331 for atom, pkg in atom_pkg_map.items():
1332 pkg_atom_map.setdefault(pkg, set()).add(atom)
1333 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1335 for cp, pkgs in cp_pkg_map.items():
1338 for atom in pkg_atom_map[pkg]:
1342 # Use a digraph to identify and eliminate any
1343 # redundant package selections.
1344 atom_pkg_graph = digraph()
1347 for atom in pkg_atom_map[pkg1]:
1349 atom_pkg_graph.add(pkg1, atom)
1350 atom_set = InternalPackageSet(initial_atoms=(atom,))
1354 if atom_set.findAtomForPackage(pkg2):
1355 atom_pkg_graph.add(pkg2, atom)
1358 eliminate_pkg = True
1359 for atom in atom_pkg_graph.parent_nodes(pkg):
1360 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1361 eliminate_pkg = False
1364 atom_pkg_graph.remove(pkg)
1366 # Yield < and <= atoms first, since those are more likely to
1367 # cause slot conflicts, and we want those atoms to be displayed
1368 # in the resulting slot conflict message (see bug #291142).
1371 for atom in cp_atoms:
1372 if atom.operator in ('<', '<='):
1373 less_than.append(atom)
1375 not_less_than.append(atom)
1377 for atom in chain(less_than, not_less_than):
1378 child_pkgs = atom_pkg_graph.child_nodes(atom)
1379 yield (atom, child_pkgs[0])
1381 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1383 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1384 Yields non-disjunctive deps. Raises InvalidDependString when
1388 while i < len(dep_struct):
1390 if isinstance(x, list):
1391 for y in self._queue_disjunctive_deps(
1392 pkg, dep_root, dep_priority, x):
1395 self._queue_disjunction(pkg, dep_root, dep_priority,
1396 [ x, dep_struct[ i + 1 ] ] )
1400 x = portage.dep.Atom(x)
1401 except portage.exception.InvalidAtom:
1402 if not pkg.installed:
1403 raise portage.exception.InvalidDependString(
1404 "invalid atom: '%s'" % x)
1406 # Note: Eventually this will check for PROPERTIES=virtual
1407 # or whatever other metadata gets implemented for this
1409 if x.cp.startswith('virtual/'):
1410 self._queue_disjunction( pkg, dep_root,
1411 dep_priority, [ str(x) ] )
1416 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1417 self._dynamic_config._dep_disjunctive_stack.append(
1418 (pkg, dep_root, dep_priority, dep_struct))
1420 def _pop_disjunction(self, allow_unsatisfied):
1422 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1423 populate self._dynamic_config._dep_stack.
1425 pkg, dep_root, dep_priority, dep_struct = \
1426 self._dynamic_config._dep_disjunctive_stack.pop()
1427 dep_string = portage.dep.paren_enclose(dep_struct)
1428 if not self._add_pkg_dep_string(
1429 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1433 def _priority(self, **kwargs):
1434 if "remove" in self._dynamic_config.myparams:
1435 priority_constructor = UnmergeDepPriority
1437 priority_constructor = DepPriority
1438 return priority_constructor(**kwargs)
1440 def _dep_expand(self, root_config, atom_without_category):
1442 @param root_config: a root config instance
1443 @type root_config: RootConfig
1444 @param atom_without_category: an atom without a category component
1445 @type atom_without_category: String
1447 @returns: a list of atoms containing categories (possibly empty)
1449 null_cp = portage.dep_getkey(insert_category_into_atom(
1450 atom_without_category, "null"))
1451 cat, atom_pn = portage.catsplit(null_cp)
1453 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1455 for db, pkg_type, built, installed, db_keys in dbs:
1456 for cat in db.categories:
1457 if db.cp_list("%s/%s" % (cat, atom_pn)):
1461 for cat in categories:
1462 deps.append(Atom(insert_category_into_atom(
1463 atom_without_category, cat)))
1466 def _have_new_virt(self, root, atom_cp):
1468 for db, pkg_type, built, installed, db_keys in \
1469 self._dynamic_config._filtered_trees[root]["dbs"]:
1470 if db.cp_list(atom_cp):
1475 def _iter_atoms_for_pkg(self, pkg):
1476 # TODO: add multiple $ROOT support
1477 if pkg.root != self._frozen_config.target_root:
1479 atom_arg_map = self._dynamic_config._atom_arg_map
1480 root_config = self._frozen_config.roots[pkg.root]
1481 for atom in self._dynamic_config._set_atoms.iterAtomsForPackage(pkg):
1482 if atom.cp != pkg.cp and \
1483 self._have_new_virt(pkg.root, atom.cp):
1486 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1487 visible_pkgs.reverse() # descending order
1489 for visible_pkg in visible_pkgs:
1490 if visible_pkg.cp != atom.cp:
1492 if pkg >= visible_pkg:
1493 # This is descending order, and we're not
1494 # interested in any versions <= pkg given.
1496 if pkg.slot_atom != visible_pkg.slot_atom:
1497 higher_slot = visible_pkg
1499 if higher_slot is not None:
1501 for arg in atom_arg_map[(atom, pkg.root)]:
1502 if isinstance(arg, PackageArg) and \
1507 def select_files(self, myfiles):
1508 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1509 self._dynamic_config._initial_arg_list and call self._resolve to create the
1510 appropriate depgraph and return a favorite list."""
1512 debug = "--debug" in self._frozen_config.myopts
1513 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1514 sets = root_config.sets
1515 getSetAtoms = root_config.setconfig.getSetAtoms
1517 myroot = self._frozen_config.target_root
1518 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1519 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1520 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1521 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1522 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1523 pkgsettings = self._frozen_config.pkgsettings[myroot]
1525 onlydeps = "--onlydeps" in self._frozen_config.myopts
1528 ext = os.path.splitext(x)[1]
1530 if not os.path.exists(x):
1532 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1533 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1534 elif os.path.exists(
1535 os.path.join(pkgsettings["PKGDIR"], x)):
1536 x = os.path.join(pkgsettings["PKGDIR"], x)
1538 print("\n\n!!! Binary package '"+str(x)+"' does not exist.")
1539 print("!!! Please ensure the tbz2 exists as specified.\n")
1540 return 0, myfavorites
1541 mytbz2=portage.xpak.tbz2(x)
1542 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1543 if os.path.realpath(x) != \
1544 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1545 print(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n"))
1546 return 0, myfavorites
1548 pkg = self._pkg(mykey, "binary", root_config,
1550 args.append(PackageArg(arg=x, package=pkg,
1551 root_config=root_config))
1552 elif ext==".ebuild":
1553 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1554 pkgdir = os.path.dirname(ebuild_path)
1555 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1556 cp = pkgdir[len(tree_root)+1:]
1557 e = portage.exception.PackageNotFound(
1558 ("%s is not in a valid portage tree " + \
1559 "hierarchy or does not exist") % x)
1560 if not portage.isvalidatom(cp):
1562 cat = portage.catsplit(cp)[0]
1563 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1564 if not portage.isvalidatom("="+mykey):
1566 ebuild_path = portdb.findname(mykey)
1568 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1569 cp, os.path.basename(ebuild_path)):
1570 print(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n"))
1571 return 0, myfavorites
1572 if mykey not in portdb.xmatch(
1573 "match-visible", portage.dep_getkey(mykey)):
1574 print(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use"))
1575 print(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man"))
1576 print(colorize("BAD", "*** page for details."))
1577 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1580 raise portage.exception.PackageNotFound(
1581 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1582 pkg = self._pkg(mykey, "ebuild", root_config,
1584 args.append(PackageArg(arg=x, package=pkg,
1585 root_config=root_config))
1586 elif x.startswith(os.path.sep):
1587 if not x.startswith(myroot):
1588 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1589 " $ROOT.\n") % x, noiselevel=-1)
1591 # Queue these up since it's most efficient to handle
1592 # multiple files in a single iter_owners() call.
1593 lookup_owners.append(x)
1595 if x in ("system", "world"):
1597 if x.startswith(SETPREFIX):
1598 s = x[len(SETPREFIX):]
1600 raise portage.exception.PackageSetNotFound(s)
1601 if s in self._dynamic_config._sets:
1603 # Recursively expand sets so that containment tests in
1604 # self._get_parent_sets() properly match atoms in nested
1605 # sets (like if world contains system).
1606 expanded_set = InternalPackageSet(
1607 initial_atoms=getSetAtoms(s))
1608 self._dynamic_config._sets[s] = expanded_set
1609 args.append(SetArg(arg=x, set=expanded_set,
1610 root_config=root_config))
1612 if not is_valid_package_atom(x):
1613 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1615 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1616 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1618 # Don't expand categories or old-style virtuals here unless
1619 # necessary. Expansion of old-style virtuals here causes at
1620 # least the following problems:
1621 # 1) It's more difficult to determine which set(s) an atom
1622 # came from, if any.
1623 # 2) It takes away freedom from the resolver to choose other
1624 # possible expansions when necessary.
1626 args.append(AtomArg(arg=x, atom=Atom(x),
1627 root_config=root_config))
1629 expanded_atoms = self._dep_expand(root_config, x)
1630 installed_cp_set = set()
1631 for atom in expanded_atoms:
1632 if vardb.cp_list(atom.cp):
1633 installed_cp_set.add(atom.cp)
1635 if len(installed_cp_set) > 1:
1636 non_virtual_cps = set()
1637 for atom_cp in installed_cp_set:
1638 if not atom_cp.startswith("virtual/"):
1639 non_virtual_cps.add(atom_cp)
1640 if len(non_virtual_cps) == 1:
1641 installed_cp_set = non_virtual_cps
1643 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1644 installed_cp = next(iter(installed_cp_set))
1645 expanded_atoms = [atom for atom in expanded_atoms \
1646 if atom.cp == installed_cp]
1648 # If a non-virtual package and one or more virtual packages
1649 # are in expanded_atoms, use the non-virtual package.
1650 if len(expanded_atoms) > 1:
1651 number_of_virtuals = 0
1652 for expanded_atom in expanded_atoms:
1653 if expanded_atom.cp.startswith("virtual/"):
1654 number_of_virtuals += 1
1656 candidate = expanded_atom
1657 if len(expanded_atoms) - number_of_virtuals == 1:
1658 expanded_atoms = [ candidate ]
1660 if len(expanded_atoms) > 1:
1663 ambiguous_package_name(x, expanded_atoms, root_config,
1664 self._frozen_config.spinner, self._frozen_config.myopts)
1665 return False, myfavorites
1667 atom = expanded_atoms[0]
1669 null_atom = Atom(insert_category_into_atom(x, "null"))
1670 cat, atom_pn = portage.catsplit(null_atom.cp)
1671 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1673 # Allow the depgraph to choose which virtual.
1674 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1678 args.append(AtomArg(arg=x, atom=atom,
1679 root_config=root_config))
1683 search_for_multiple = False
1684 if len(lookup_owners) > 1:
1685 search_for_multiple = True
1687 for x in lookup_owners:
1688 if not search_for_multiple and os.path.isdir(x):
1689 search_for_multiple = True
1690 relative_paths.append(x[len(myroot)-1:])
1693 for pkg, relative_path in \
1694 real_vardb._owners.iter_owners(relative_paths):
1695 owners.add(pkg.mycpv)
1696 if not search_for_multiple:
1700 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1701 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1705 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1707 # portage now masks packages with missing slot, but it's
1708 # possible that one was installed by an older version
1709 atom = Atom(portage.cpv_getkey(cpv))
1711 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1712 args.append(AtomArg(arg=atom, atom=atom,
1713 root_config=root_config))
1715 if "--update" in self._frozen_config.myopts:
1716 # In some cases, the greedy slots behavior can pull in a slot that
1717 # the user would want to uninstall due to it being blocked by a
1718 # newer version in a different slot. Therefore, it's necessary to
1719 # detect and discard any that should be uninstalled. Each time
1720 # that arguments are updated, package selections are repeated in
1721 # order to ensure consistency with the current arguments:
1723 # 1) Initialize args
1724 # 2) Select packages and generate initial greedy atoms
1725 # 3) Update args with greedy atoms
1726 # 4) Select packages and generate greedy atoms again, while
1727 # accounting for any blockers between selected packages
1728 # 5) Update args with revised greedy atoms
1730 self._set_args(args)
1733 greedy_args.append(arg)
1734 if not isinstance(arg, AtomArg):
1736 for atom in self._greedy_slots(arg.root_config, arg.atom):
1738 AtomArg(arg=arg.arg, atom=atom,
1739 root_config=arg.root_config))
1741 self._set_args(greedy_args)
1744 # Revise greedy atoms, accounting for any blockers
1745 # between selected packages.
1746 revised_greedy_args = []
1748 revised_greedy_args.append(arg)
1749 if not isinstance(arg, AtomArg):
1751 for atom in self._greedy_slots(arg.root_config, arg.atom,
1752 blocker_lookahead=True):
1753 revised_greedy_args.append(
1754 AtomArg(arg=arg.arg, atom=atom,
1755 root_config=arg.root_config))
1756 args = revised_greedy_args
1757 del revised_greedy_args
1759 self._set_args(args)
1761 myfavorites = set(myfavorites)
1763 if isinstance(arg, (AtomArg, PackageArg)):
1764 myfavorites.add(arg.atom)
1765 elif isinstance(arg, SetArg):
1766 myfavorites.add(arg.arg)
1767 myfavorites = list(myfavorites)
1770 portage.writemsg("\n", noiselevel=-1)
1771 # Order needs to be preserved since a feature of --nodeps
1772 # is to allow the user to force a specific merge order.
1773 self._dynamic_config._initial_arg_list = args[:]
1775 return self._resolve(myfavorites)
1777 def _resolve(self, myfavorites):
1778 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1779 call self._creategraph to process theier deps and return
1781 debug = "--debug" in self._frozen_config.myopts
1782 onlydeps = "--onlydeps" in self._frozen_config.myopts
1783 myroot = self._frozen_config.target_root
1784 pkgsettings = self._frozen_config.pkgsettings[myroot]
1785 pprovideddict = pkgsettings.pprovideddict
1786 virtuals = pkgsettings.getvirtuals()
1787 for arg in self._dynamic_config._initial_arg_list:
1788 for atom in arg.set:
1789 self._spinner_update()
1790 dep = Dependency(atom=atom, onlydeps=onlydeps,
1791 root=myroot, parent=arg)
1793 pprovided = pprovideddict.get(atom.cp)
1794 if pprovided and portage.match_from_list(atom, pprovided):
1795 # A provided package has been specified on the command line.
1796 self._dynamic_config._pprovided_args.append((arg, atom))
1798 if isinstance(arg, PackageArg):
1799 if not self._add_pkg(arg.package, dep) or \
1800 not self._create_graph():
1801 if not self._dynamic_config._need_restart:
1802 sys.stderr.write(("\n\n!!! Problem " + \
1803 "resolving dependencies for %s\n") % \
1805 return 0, myfavorites
1808 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1809 (arg, atom), noiselevel=-1)
1810 pkg, existing_node = self._select_package(
1811 myroot, atom, onlydeps=onlydeps)
1813 pprovided_match = False
1814 for virt_choice in virtuals.get(atom.cp, []):
1815 expanded_atom = portage.dep.Atom(
1816 atom.replace(atom.cp,
1817 portage.dep_getkey(virt_choice), 1))
1818 pprovided = pprovideddict.get(expanded_atom.cp)
1820 portage.match_from_list(expanded_atom, pprovided):
1821 # A provided package has been
1822 # specified on the command line.
1823 self._dynamic_config._pprovided_args.append((arg, atom))
1824 pprovided_match = True
1829 if not (isinstance(arg, SetArg) and \
1830 arg.name in ("selected", "system", "world")):
1831 self._dynamic_config._unsatisfied_deps_for_display.append(
1832 ((myroot, atom), {}))
1833 return 0, myfavorites
1834 self._dynamic_config._missing_args.append((arg, atom))
1836 if atom.cp != pkg.cp:
1837 # For old-style virtuals, we need to repeat the
1838 # package.provided check against the selected package.
1839 expanded_atom = atom.replace(atom.cp, pkg.cp)
1840 pprovided = pprovideddict.get(pkg.cp)
1842 portage.match_from_list(expanded_atom, pprovided):
1843 # A provided package has been
1844 # specified on the command line.
1845 self._dynamic_config._pprovided_args.append((arg, atom))
1847 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1848 self._dynamic_config._unsatisfied_deps_for_display.append(
1849 ((myroot, atom), {}))
1850 # Previous behavior was to bail out in this case, but
1851 # since the dep is satisfied by the installed package,
1852 # it's more friendly to continue building the graph
1853 # and just show a warning message. Therefore, only bail
1854 # out here if the atom is not from either the system or
1856 if not (isinstance(arg, SetArg) and \
1857 arg.name in ("selected", "system", "world")):
1858 return 0, myfavorites
1860 # Add the selected package to the graph as soon as possible
1861 # so that later dep_check() calls can use it as feedback
1862 # for making more consistent atom selections.
1863 if not self._add_pkg(pkg, dep):
1864 if self._dynamic_config._need_restart:
1866 elif isinstance(arg, SetArg):
1867 sys.stderr.write(("\n\n!!! Problem resolving " + \
1868 "dependencies for %s from %s\n") % \
1871 sys.stderr.write(("\n\n!!! Problem resolving " + \
1872 "dependencies for %s\n") % atom)
1873 return 0, myfavorites
1875 except portage.exception.MissingSignature as e:
1876 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1877 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1878 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1879 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1880 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1881 return 0, myfavorites
1882 except portage.exception.InvalidSignature as e:
1883 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1884 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1885 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1886 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1887 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1888 return 0, myfavorites
1889 except SystemExit as e:
1890 raise # Needed else can't exit
1891 except Exception as e:
1892 print("\n\n!!! Problem in '%s' dependencies." % atom, file=sys.stderr)
1893 print("!!!", str(e), getattr(e, "__module__", None), file=sys.stderr)
1896 # Now that the root packages have been added to the graph,
1897 # process the dependencies.
1898 if not self._create_graph():
1899 return 0, myfavorites
1902 if "--usepkgonly" in self._frozen_config.myopts:
1903 for xs in self._dynamic_config.digraph.all_nodes():
1904 if not isinstance(xs, Package):
1906 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1910 print("Missing binary for:",xs[2])
1914 except self._unknown_internal_error:
1915 return False, myfavorites
1917 # We're true here unless we are missing binaries.
1918 return (not missing,myfavorites)
1920 def _set_args(self, args):
1922 Create the "args" package set from atoms and packages given as
1923 arguments. This method can be called multiple times if necessary.
1924 The package selection cache is automatically invalidated, since
1925 arguments influence package selections.
1927 args_set = self._dynamic_config._sets["args"]
1930 if not isinstance(arg, (AtomArg, PackageArg)):
1933 if atom in args_set:
1937 self._dynamic_config._set_atoms.clear()
1938 self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.values()))
1939 atom_arg_map = self._dynamic_config._atom_arg_map
1940 atom_arg_map.clear()
1942 for atom in arg.set:
1943 atom_key = (atom, arg.root_config.root)
1944 refs = atom_arg_map.get(atom_key)
1947 atom_arg_map[atom_key] = refs
1951 # Invalidate the package selection cache, since
1952 # arguments influence package selections.
1953 self._dynamic_config._highest_pkg_cache.clear()
1954 for trees in self._dynamic_config._filtered_trees.values():
1955 trees["porttree"].dbapi._clear_cache()
1957 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1959 Return a list of slot atoms corresponding to installed slots that
1960 differ from the slot of the highest visible match. When
1961 blocker_lookahead is True, slot atoms that would trigger a blocker
1962 conflict are automatically discarded, potentially allowing automatic
1963 uninstallation of older slots when appropriate.
1965 highest_pkg, in_graph = self._select_package(root_config.root, atom)
1966 if highest_pkg is None:
1968 vardb = root_config.trees["vartree"].dbapi
1970 for cpv in vardb.match(atom):
1971 # don't mix new virtuals with old virtuals
1972 if portage.cpv_getkey(cpv) == highest_pkg.cp:
1973 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1975 slots.add(highest_pkg.metadata["SLOT"])
1979 slots.remove(highest_pkg.metadata["SLOT"])
1982 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1983 pkg, in_graph = self._select_package(root_config.root, slot_atom)
1984 if pkg is not None and \
1985 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1986 greedy_pkgs.append(pkg)
1989 if not blocker_lookahead:
1990 return [pkg.slot_atom for pkg in greedy_pkgs]
1993 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1994 for pkg in greedy_pkgs + [highest_pkg]:
1995 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1997 selected_atoms = self._select_atoms(
1998 pkg.root, dep_str, pkg.use.enabled,
1999 parent=pkg, strict=True)
2000 except portage.exception.InvalidDependString:
2003 for atoms in selected_atoms.values():
2004 blocker_atoms.extend(x for x in atoms if x.blocker)
2005 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2007 if highest_pkg not in blockers:
2010 # filter packages with invalid deps
2011 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2013 # filter packages that conflict with highest_pkg
2014 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2015 (blockers[highest_pkg].findAtomForPackage(pkg) or \
2016 blockers[pkg].findAtomForPackage(highest_pkg))]
2021 # If two packages conflict, discard the lower version.
2022 discard_pkgs = set()
2023 greedy_pkgs.sort(reverse=True)
2024 for i in range(len(greedy_pkgs) - 1):
2025 pkg1 = greedy_pkgs[i]
2026 if pkg1 in discard_pkgs:
2028 for j in range(i + 1, len(greedy_pkgs)):
2029 pkg2 = greedy_pkgs[j]
2030 if pkg2 in discard_pkgs:
2032 if blockers[pkg1].findAtomForPackage(pkg2) or \
2033 blockers[pkg2].findAtomForPackage(pkg1):
2035 discard_pkgs.add(pkg2)
2037 return [pkg.slot_atom for pkg in greedy_pkgs \
2038 if pkg not in discard_pkgs]
2040 def _select_atoms_from_graph(self, *pargs, **kwargs):
2042 Prefer atoms matching packages that have already been
2043 added to the graph or those that are installed and have
2044 not been scheduled for replacement.
2046 kwargs["trees"] = self._dynamic_config._graph_trees
2047 return self._select_atoms_highest_available(*pargs, **kwargs)
2049 def _select_atoms_highest_available(self, root, depstring,
2050 myuse=None, parent=None, strict=True, trees=None, priority=None):
2051 """This will raise InvalidDependString if necessary. If trees is
2052 None then self._dynamic_config._filtered_trees is used."""
2053 pkgsettings = self._frozen_config.pkgsettings[root]
2055 trees = self._dynamic_config._filtered_trees
2056 atom_graph = digraph()
2059 if parent is not None:
2060 trees[root]["parent"] = parent
2061 trees[root]["atom_graph"] = atom_graph
2062 if priority is not None:
2063 trees[root]["priority"] = priority
2065 portage.dep._dep_check_strict = False
2066 mycheck = portage.dep_check(depstring, None,
2067 pkgsettings, myuse=myuse,
2068 myroot=root, trees=trees)
2070 if parent is not None:
2071 trees[root].pop("parent")
2072 trees[root].pop("atom_graph")
2073 if priority is not None:
2074 trees[root].pop("priority")
2075 portage.dep._dep_check_strict = True
2077 raise portage.exception.InvalidDependString(mycheck[1])
2079 selected_atoms = mycheck[1]
2081 chosen_atoms = frozenset(mycheck[1])
2082 selected_atoms = {parent : []}
2083 for node in atom_graph:
2084 if isinstance(node, Atom):
2089 pkg, virt_atom = node
2090 if virt_atom not in chosen_atoms:
2092 if not portage.match_from_list(virt_atom, [pkg]):
2093 # Typically this means that the atom
2094 # specifies USE deps that are unsatisfied
2095 # by the selected package. The caller will
2096 # record this as an unsatisfied dependency
2100 selected_atoms[pkg] = [atom for atom in \
2101 atom_graph.child_nodes(node) if atom in chosen_atoms]
2103 return selected_atoms
2105 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2106 check_backtrack=False):
2108 When check_backtrack=True, no output is produced and
2109 the method either returns or raises _backtrack_mask if
2110 a matching package has been masked by backtracking.
2112 backtrack_mask = False
2113 atom_set = InternalPackageSet(initial_atoms=(atom,))
2114 xinfo = '"%s"' % atom
2117 # Discard null/ from failed cpv_expand category expansion.
2118 xinfo = xinfo.replace("null/", "")
2119 masked_packages = []
2121 masked_pkg_instances = set()
2122 missing_licenses = []
2123 have_eapi_mask = False
2124 pkgsettings = self._frozen_config.pkgsettings[root]
2125 implicit_iuse = pkgsettings._get_implicit_iuse()
2126 root_config = self._frozen_config.roots[root]
2127 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2128 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2129 for db, pkg_type, built, installed, db_keys in dbs:
2133 if hasattr(db, "xmatch"):
2134 cpv_list = db.xmatch("match-all", atom.without_use)
2136 cpv_list = db.match(atom.without_use)
2139 for cpv in cpv_list:
2140 metadata, mreasons = get_mask_info(root_config, cpv,
2141 pkgsettings, db, pkg_type, built, installed, db_keys)
2142 if metadata is not None:
2143 pkg = self._pkg(cpv, pkg_type, root_config,
2144 installed=installed)
2145 # pkg.metadata contains calculated USE for ebuilds,
2146 # required later for getMissingLicenses.
2147 metadata = pkg.metadata
2148 if pkg.cp != atom.cp:
2149 # A cpv can be returned from dbapi.match() as an
2150 # old-style virtual match even in cases when the
2151 # package does not actually PROVIDE the virtual.
2152 # Filter out any such false matches here.
2153 if not atom_set.findAtomForPackage(pkg):
2155 if pkg in self._dynamic_config._runtime_pkg_mask:
2156 backtrack_reasons = \
2157 self._dynamic_config._runtime_pkg_mask[pkg]
2158 mreasons.append('backtracking: %s' % \
2159 ', '.join(sorted(backtrack_reasons)))
2160 backtrack_mask = True
2162 masked_pkg_instances.add(pkg)
2164 missing_use.append(pkg)
2167 masked_packages.append(
2168 (root_config, pkgsettings, cpv, metadata, mreasons))
2172 raise self._backtrack_mask()
2176 missing_use_reasons = []
2177 missing_iuse_reasons = []
2178 for pkg in missing_use:
2179 use = pkg.use.enabled
2180 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
2181 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
2183 for x in atom.use.required:
2184 if iuse_re.match(x) is None:
2185 missing_iuse.append(x)
2188 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2189 missing_iuse_reasons.append((pkg, mreasons))
2191 need_enable = sorted(atom.use.enabled.difference(use))
2192 need_disable = sorted(atom.use.disabled.intersection(use))
2193 if need_enable or need_disable:
2195 changes.extend(colorize("red", "+" + x) \
2196 for x in need_enable)
2197 changes.extend(colorize("blue", "-" + x) \
2198 for x in need_disable)
2199 mreasons.append("Change USE: %s" % " ".join(changes))
2200 missing_use_reasons.append((pkg, mreasons))
2202 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2203 in missing_use_reasons if pkg not in masked_pkg_instances]
2205 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2206 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2208 show_missing_use = False
2209 if unmasked_use_reasons:
2210 # Only show the latest version.
2211 show_missing_use = unmasked_use_reasons[:1]
2212 elif unmasked_iuse_reasons:
2213 if missing_use_reasons:
2214 # All packages with required IUSE are masked,
2215 # so display a normal masking message.
2218 show_missing_use = unmasked_iuse_reasons
2222 if show_missing_use:
2223 print("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".")
2224 print("!!! One of the following packages is required to complete your request:")
2225 for pkg, mreasons in show_missing_use:
2226 print("- "+pkg.cpv+" ("+", ".join(mreasons)+")")
2228 elif masked_packages:
2230 colorize("BAD", "All ebuilds that could satisfy ") + \
2231 colorize("INFORM", xinfo) + \
2232 colorize("BAD", " have been masked."))
2233 print("!!! One of the following masked packages is required to complete your request:")
2234 have_eapi_mask = show_masked_packages(masked_packages)
2237 msg = ("The current version of portage supports " + \
2238 "EAPI '%s'. You must upgrade to a newer version" + \
2239 " of portage before EAPI masked packages can" + \
2240 " be installed.") % portage.const.EAPI
2241 from textwrap import wrap
2242 for line in wrap(msg, 75):
2247 print("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".")
2249 # Show parent nodes and the argument that pulled them in.
2250 traversed_nodes = set()
2253 while node is not None:
2254 traversed_nodes.add(node)
2255 msg.append('(dependency required by "%s" [%s])' % \
2256 (colorize('INFORM', str(node.cpv)), node.type_name))
2258 if node not in self._dynamic_config.digraph:
2259 # The parent is not in the graph due to backtracking.
2262 # When traversing to parents, prefer arguments over packages
2263 # since arguments are root nodes. Never traverse the same
2264 # package twice, in order to prevent an infinite loop.
2265 selected_parent = None
2266 for parent in self._dynamic_config.digraph.parent_nodes(node):
2267 if isinstance(parent, DependencyArg):
2268 msg.append('(dependency required by "%s" [argument])' % \
2269 (colorize('INFORM', str(parent))))
2270 selected_parent = None
2272 if parent not in traversed_nodes:
2273 selected_parent = parent
2274 node = selected_parent
2284 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2286 Iterate over Package instances of pkg_type matching the given atom.
2287 This does not check visibility and it also does not match USE for
2288 unbuilt ebuilds since USE are lazily calculated after visibility
2289 checks (to avoid the expense when possible).
2292 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2294 if hasattr(db, "xmatch"):
2295 cpv_list = db.xmatch("match-all", atom)
2297 cpv_list = db.match(atom)
2299 # USE=multislot can make an installed package appear as if
2300 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2301 # won't do any good as long as USE=multislot is enabled since
2302 # the newly built package still won't have the expected slot.
2303 # Therefore, assume that such SLOT dependencies are already
2304 # satisfied rather than forcing a rebuild.
2305 installed = pkg_type == 'installed'
2306 if installed and not cpv_list and atom.slot:
2307 for cpv in db.match(atom.cp):
2308 slot_available = False
2309 for other_db, other_type, other_built, \
2310 other_installed, other_keys in \
2311 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2314 other_db.aux_get(cpv, ["SLOT"])[0]:
2315 slot_available = True
2319 if not slot_available:
2321 inst_pkg = self._pkg(cpv, "installed",
2322 root_config, installed=installed)
2323 # Remove the slot from the atom and verify that
2324 # the package matches the resulting atom.
2325 atom_without_slot = portage.dep.remove_slot(atom)
2327 atom_without_slot += str(atom.use)
2328 atom_without_slot = portage.dep.Atom(atom_without_slot)
2329 if portage.match_from_list(
2330 atom_without_slot, [inst_pkg]):
2331 cpv_list = [inst_pkg.cpv]
2338 for cpv in cpv_list:
2340 pkg = self._pkg(cpv, pkg_type, root_config,
2341 installed=installed, onlydeps=onlydeps)
2342 except portage.exception.PackageNotFound:
2345 if pkg.cp != atom.cp:
2346 # A cpv can be returned from dbapi.match() as an
2347 # old-style virtual match even in cases when the
2348 # package does not actually PROVIDE the virtual.
2349 # Filter out any such false matches here.
2350 if not InternalPackageSet(initial_atoms=(atom,)
2351 ).findAtomForPackage(pkg):
2355 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2356 cache_key = (root, atom, onlydeps)
2357 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2360 if pkg and not existing:
2361 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2362 if existing and existing == pkg:
2363 # Update the cache to reflect that the
2364 # package has been added to the graph.
2366 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2368 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2369 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2372 settings = pkg.root_config.settings
2373 if pkg.visible and not (pkg.installed and \
2374 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2375 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2378 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2379 root_config = self._frozen_config.roots[root]
2380 pkgsettings = self._frozen_config.pkgsettings[root]
2381 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2382 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2383 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2384 # List of acceptable packages, ordered by type preference.
2385 matched_packages = []
2386 highest_version = None
2387 if not isinstance(atom, portage.dep.Atom):
2388 atom = portage.dep.Atom(atom)
2390 atom_set = InternalPackageSet(initial_atoms=(atom,))
2391 existing_node = None
2393 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2394 empty = "empty" in self._dynamic_config.myparams
2395 selective = "selective" in self._dynamic_config.myparams
2397 noreplace = "--noreplace" in self._frozen_config.myopts
2398 avoid_update = "--update" not in self._frozen_config.myopts
2399 use_ebuild_visibility = self._frozen_config.myopts.get(
2400 '--use-ebuild-visibility', 'n') != 'n'
2401 # Behavior of the "selective" parameter depends on
2402 # whether or not a package matches an argument atom.
2403 # If an installed package provides an old-style
2404 # virtual that is no longer provided by an available
2405 # package, the installed package may match an argument
2406 # atom even though none of the available packages do.
2407 # Therefore, "selective" logic does not consider
2408 # whether or not an installed package matches an
2409 # argument atom. It only considers whether or not
2410 # available packages match argument atoms, which is
2411 # represented by the found_available_arg flag.
2412 found_available_arg = False
2413 for find_existing_node in True, False:
2416 for db, pkg_type, built, installed, db_keys in dbs:
2419 if installed and not find_existing_node:
2420 want_reinstall = reinstall or empty or \
2421 (found_available_arg and not selective)
2422 if want_reinstall and matched_packages:
2425 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom,
2427 if pkg in self._dynamic_config._runtime_pkg_mask:
2428 # The package has been masked by the backtracking logic
2431 # Make --noreplace take precedence over --newuse.
2432 if not pkg.installed and noreplace and \
2433 cpv in vardb.match(atom):
2434 # If the installed version is masked, it may
2435 # be necessary to look at lower versions,
2436 # in case there is a visible downgrade.
2438 reinstall_for_flags = None
2440 if not pkg.installed or \
2441 (matched_packages and not avoid_update):
2442 # Only enforce visibility on installed packages
2443 # if there is at least one other visible package
2444 # available. By filtering installed masked packages
2445 # here, packages that have been masked since they
2446 # were installed can be automatically downgraded
2447 # to an unmasked version.
2452 # Enable upgrade or downgrade to a version
2453 # with visible KEYWORDS when the installed
2454 # version is masked by KEYWORDS, but never
2455 # reinstall the same exact version only due
2456 # to a KEYWORDS mask. See bug #252167.
2457 if matched_packages:
2459 different_version = None
2460 for avail_pkg in matched_packages:
2461 if not portage.dep.cpvequal(
2462 pkg.cpv, avail_pkg.cpv):
2463 different_version = avail_pkg
2465 if different_version is not None:
2466 # If the ebuild no longer exists or it's
2467 # keywords have been dropped, reject built
2468 # instances (installed or binary).
2469 # If --usepkgonly is enabled, assume that
2470 # the ebuild status should be ignored.
2471 if not use_ebuild_visibility and usepkgonly:
2473 pkgsettings._getMissingKeywords(
2474 pkg.cpv, pkg.metadata):
2479 pkg.cpv, "ebuild", root_config)
2480 except portage.exception.PackageNotFound:
2483 if not pkg_eb.visible:
2486 # Calculation of USE for unbuilt ebuilds is relatively
2487 # expensive, so it is only performed lazily, after the
2488 # above visibility checks are complete.
2491 if root == self._frozen_config.target_root:
2493 myarg = next(self._iter_atoms_for_pkg(pkg))
2494 except StopIteration:
2496 except portage.exception.InvalidDependString:
2498 # masked by corruption
2500 if not installed and myarg:
2501 found_available_arg = True
2503 if atom.use and not pkg.built:
2504 use = pkg.use.enabled
2505 if atom.use.enabled.difference(use):
2507 if atom.use.disabled.intersection(use):
2509 if pkg.cp == atom_cp:
2510 if highest_version is None:
2511 highest_version = pkg
2512 elif pkg > highest_version:
2513 highest_version = pkg
2514 # At this point, we've found the highest visible
2515 # match from the current repo. Any lower versions
2516 # from this repo are ignored, so this so the loop
2517 # will always end with a break statement below
2519 if find_existing_node:
2520 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2523 # Use PackageSet.findAtomForPackage()
2524 # for PROVIDE support.
2525 if atom_set.findAtomForPackage(e_pkg):
2526 if highest_version and \
2527 e_pkg.cp == atom_cp and \
2528 e_pkg < highest_version and \
2529 e_pkg.slot_atom != highest_version.slot_atom:
2530 # There is a higher version available in a
2531 # different slot, so this existing node is
2535 matched_packages.append(e_pkg)
2536 existing_node = e_pkg
2538 # Compare built package to current config and
2539 # reject the built package if necessary.
2540 if built and not installed and \
2541 ("--newuse" in self._frozen_config.myopts or \
2542 "--reinstall" in self._frozen_config.myopts or \
2543 "--binpkg-respect-use" in self._frozen_config.myopts):
2544 iuses = pkg.iuse.all
2545 old_use = pkg.use.enabled
2547 pkgsettings.setcpv(myeb)
2549 pkgsettings.setcpv(pkg)
2550 now_use = pkgsettings["PORTAGE_USE"].split()
2551 forced_flags = set()
2552 forced_flags.update(pkgsettings.useforce)
2553 forced_flags.update(pkgsettings.usemask)
2555 if myeb and not usepkgonly:
2556 cur_iuse = myeb.iuse.all
2557 if self._reinstall_for_flags(forced_flags,
2561 # Compare current config to installed package
2562 # and do not reinstall if possible.
2563 if not installed and \
2564 ("--newuse" in self._frozen_config.myopts or \
2565 "--reinstall" in self._frozen_config.myopts) and \
2566 cpv in vardb.match(atom):
2567 pkgsettings.setcpv(pkg)
2568 forced_flags = set()
2569 forced_flags.update(pkgsettings.useforce)
2570 forced_flags.update(pkgsettings.usemask)
2571 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2572 old_iuse = set(filter_iuse_defaults(
2573 vardb.aux_get(cpv, ["IUSE"])[0].split()))
2574 cur_use = pkg.use.enabled
2575 cur_iuse = pkg.iuse.all
2576 reinstall_for_flags = \
2577 self._reinstall_for_flags(
2578 forced_flags, old_use, old_iuse,
2580 if reinstall_for_flags:
2584 matched_packages.append(pkg)
2585 if reinstall_for_flags:
2586 self._dynamic_config._reinstall_nodes[pkg] = \
2590 if not matched_packages:
2593 if "--debug" in self._frozen_config.myopts:
2594 for pkg in matched_packages:
2595 portage.writemsg("%s %s\n" % \
2596 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2598 # Filter out any old-style virtual matches if they are
2599 # mixed with new-style virtual matches.
2601 if len(matched_packages) > 1 and \
2602 "virtual" == portage.catsplit(cp)[0]:
2603 for pkg in matched_packages:
2606 # Got a new-style virtual, so filter
2607 # out any old-style virtuals.
2608 matched_packages = [pkg for pkg in matched_packages \
2612 if len(matched_packages) > 1:
2614 if existing_node is not None:
2615 return existing_node, existing_node
2616 for pkg in matched_packages:
2618 return pkg, existing_node
2620 bestmatch = portage.best(
2621 [pkg.cpv for pkg in matched_packages])
2622 matched_packages = [pkg for pkg in matched_packages \
2623 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2625 # ordered by type preference ("ebuild" type is the last resort)
2626 return matched_packages[-1], existing_node
2628 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2630 Select packages that have already been added to the graph or
2631 those that are installed and have not been scheduled for
2634 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
2635 matches = graph_db.match_pkgs(atom)
2638 pkg = matches[-1] # highest match
2639 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2640 return pkg, in_graph
2642 def _complete_graph(self, required_sets=None):
2644 Add any deep dependencies of required sets (args, system, world) that
2645 have not been pulled into the graph yet. This ensures that the graph
2646 is consistent such that initially satisfied deep dependencies are not
2647 broken in the new graph. Initially unsatisfied dependencies are
2648 irrelevant since we only want to avoid breaking dependencies that are
2651 Since this method can consume enough time to disturb users, it is
2652 currently only enabled by the --complete-graph option.
2654 @param required_sets: contains required sets (currently only used
2655 for depclean and prune removal operations)
2656 @type required_sets: dict
2658 if "--buildpkgonly" in self._frozen_config.myopts or \
2659 "recurse" not in self._dynamic_config.myparams:
2662 if "complete" not in self._dynamic_config.myparams:
2663 # Skip this to avoid consuming enough time to disturb users.
2668 # Put the depgraph into a mode that causes it to only
2669 # select packages that have already been added to the
2670 # graph or those that are installed and have not been
2671 # scheduled for replacement. Also, toggle the "deep"
2672 # parameter so that all dependencies are traversed and
2674 self._select_atoms = self._select_atoms_from_graph
2675 self._select_package = self._select_pkg_from_graph
2676 already_deep = self._dynamic_config.myparams.get("deep") is True
2677 if not already_deep:
2678 self._dynamic_config.myparams["deep"] = True
2680 for root in self._frozen_config.roots:
2681 if root != self._frozen_config.target_root and \
2682 "remove" in self._dynamic_config.myparams:
2683 # Only pull in deps for the relevant root.
2685 if required_sets is None or root not in required_sets:
2686 required_set_names = self._frozen_config._required_set_names.copy()
2688 required_set_names = set(required_sets[root])
2689 if root == self._frozen_config.target_root and \
2690 (already_deep or "empty" in self._dynamic_config.myparams):
2691 required_set_names.difference_update(self._dynamic_config._sets)
2692 if not required_set_names and \
2693 not self._dynamic_config._ignored_deps and \
2694 not self._dynamic_config._dep_stack:
2696 root_config = self._frozen_config.roots[root]
2697 setconfig = root_config.setconfig
2699 # Reuse existing SetArg instances when available.
2700 for arg in self._dynamic_config.digraph.root_nodes():
2701 if not isinstance(arg, SetArg):
2703 if arg.root_config != root_config:
2705 if arg.name in required_set_names:
2707 required_set_names.remove(arg.name)
2708 # Create new SetArg instances only when necessary.
2709 for s in required_set_names:
2710 if required_sets is None or root not in required_sets:
2711 expanded_set = InternalPackageSet(
2712 initial_atoms=setconfig.getSetAtoms(s))
2714 expanded_set = required_sets[root][s]
2715 atom = SETPREFIX + s
2716 args.append(SetArg(arg=atom, set=expanded_set,
2717 root_config=root_config))
2718 if root == self._frozen_config.target_root:
2719 self._dynamic_config._sets[s] = expanded_set
2720 vardb = root_config.trees["vartree"].dbapi
2722 for atom in arg.set:
2723 self._dynamic_config._dep_stack.append(
2724 Dependency(atom=atom, root=root, parent=arg))
2725 if self._dynamic_config._ignored_deps:
2726 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
2727 self._dynamic_config._ignored_deps = []
2728 if not self._create_graph(allow_unsatisfied=True):
2730 # Check the unsatisfied deps to see if any initially satisfied deps
2731 # will become unsatisfied due to an upgrade. Initially unsatisfied
2732 # deps are irrelevant since we only want to avoid breaking deps
2733 # that are initially satisfied.
2734 while self._dynamic_config._unsatisfied_deps:
2735 dep = self._dynamic_config._unsatisfied_deps.pop()
2736 matches = vardb.match_pkgs(dep.atom)
2738 self._dynamic_config._initially_unsatisfied_deps.append(dep)
2740 # An scheduled installation broke a deep dependency.
2741 # Add the installed package to the graph so that it
2742 # will be appropriately reported as a slot collision
2743 # (possibly solvable via backtracking).
2744 pkg = matches[-1] # highest match
2745 if not self._add_pkg(pkg, dep):
2747 if not self._create_graph(allow_unsatisfied=True):
2751 def _pkg(self, cpv, type_name, root_config, installed=False,
2754 Get a package instance from the cache, or create a new
2755 one if necessary. Raises PackageNotFound from aux_get if it
2756 failures for some reason (package does not exist or is
2760 if installed or onlydeps:
2761 operation = "nomerge"
2762 pkg = self._frozen_config._pkg_cache.get(
2763 (type_name, root_config.root, cpv, operation))
2764 if pkg is None and onlydeps and not installed:
2765 # Maybe it already got pulled in as a "merge" node.
2766 pkg = self._dynamic_config.mydbapi[root_config.root].get(
2767 (type_name, root_config.root, cpv, 'merge'))
2770 tree_type = self.pkg_tree_map[type_name]
2771 db = root_config.trees[tree_type].dbapi
2772 db_keys = list(self._frozen_config._trees_orig[root_config.root][
2773 tree_type].dbapi._aux_cache_keys)
2775 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
2777 raise portage.exception.PackageNotFound(cpv)
2778 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
2779 installed=installed, metadata=metadata, onlydeps=onlydeps,
2780 root_config=root_config, type_name=type_name)
2781 self._frozen_config._pkg_cache[pkg] = pkg
2783 if not pkg.visible and \
2784 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
2785 slot_key = (pkg.root, pkg.slot_atom)
2786 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
2787 if other_pkg is None or pkg > other_pkg:
2788 self._frozen_config._highest_license_masked[slot_key] = pkg
2792 def _validate_blockers(self):
2793 """Remove any blockers from the digraph that do not match any of the
2794 packages within the graph. If necessary, create hard deps to ensure
2795 correct merge order such that mutually blocking packages are never
2796 installed simultaneously."""
2798 if "--buildpkgonly" in self._frozen_config.myopts or \
2799 "--nodeps" in self._frozen_config.myopts:
2802 complete = "complete" in self._dynamic_config.myparams
2803 deep = "deep" in self._dynamic_config.myparams
2806 # Pull in blockers from all installed packages that haven't already
2807 # been pulled into the depgraph. This is not enabled by default
2808 # due to the performance penalty that is incurred by all the
2809 # additional dep_check calls that are required.
2811 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2812 for myroot in self._frozen_config.trees:
2813 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
2814 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
2815 pkgsettings = self._frozen_config.pkgsettings[myroot]
2816 root_config = self._frozen_config.roots[myroot]
2817 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
2818 final_db = self._dynamic_config.mydbapi[myroot]
2820 blocker_cache = BlockerCache(myroot, vardb)
2821 stale_cache = set(blocker_cache)
2824 stale_cache.discard(cpv)
2825 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
2827 # Check for masked installed packages. Only warn about
2828 # packages that are in the graph in order to avoid warning
2829 # about those that will be automatically uninstalled during
2830 # the merge process or by --depclean. Always warn about
2831 # packages masked by license, since the user likely wants
2832 # to adjust ACCEPT_LICENSE.
2834 if not pkg.visible and \
2835 (pkg_in_graph or 'LICENSE' in pkg.masks):
2836 self._dynamic_config._masked_installed.add(pkg)
2838 self._check_masks(pkg)
2840 blocker_atoms = None
2846 self._dynamic_config._blocker_parents.child_nodes(pkg))
2851 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
2854 if blockers is not None:
2855 blockers = set(blocker.atom for blocker in blockers)
2857 # If this node has any blockers, create a "nomerge"
2858 # node for it so that they can be enforced.
2859 self._spinner_update()
2860 blocker_data = blocker_cache.get(cpv)
2861 if blocker_data is not None and \
2862 blocker_data.counter != long(pkg.metadata["COUNTER"]):
2865 # If blocker data from the graph is available, use
2866 # it to validate the cache and update the cache if
2868 if blocker_data is not None and \
2869 blockers is not None:
2870 if not blockers.symmetric_difference(
2871 blocker_data.atoms):
2875 if blocker_data is None and \
2876 blockers is not None:
2877 # Re-use the blockers from the graph.
2878 blocker_atoms = sorted(blockers)
2879 counter = long(pkg.metadata["COUNTER"])
2881 blocker_cache.BlockerData(counter, blocker_atoms)
2882 blocker_cache[pkg.cpv] = blocker_data
2886 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
2888 # Use aux_get() to trigger FakeVartree global
2889 # updates on *DEPEND when appropriate.
2890 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2891 # It is crucial to pass in final_db here in order to
2892 # optimize dep_check calls by eliminating atoms via
2893 # dep_wordreduce and dep_eval calls.
2895 portage.dep._dep_check_strict = False
2897 success, atoms = portage.dep_check(depstr,
2898 final_db, pkgsettings, myuse=pkg.use.enabled,
2899 trees=self._dynamic_config._graph_trees, myroot=myroot)
2900 except Exception as e:
2901 if isinstance(e, SystemExit):
2903 # This is helpful, for example, if a ValueError
2904 # is thrown from cpv_expand due to multiple
2905 # matches (this can happen if an atom lacks a
2907 show_invalid_depstring_notice(
2908 pkg, depstr, str(e))
2912 portage.dep._dep_check_strict = True
2914 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2915 if replacement_pkg and \
2916 replacement_pkg[0].operation == "merge":
2917 # This package is being replaced anyway, so
2918 # ignore invalid dependencies so as not to
2919 # annoy the user too much (otherwise they'd be
2920 # forced to manually unmerge it first).
2922 show_invalid_depstring_notice(pkg, depstr, atoms)
2924 blocker_atoms = [myatom for myatom in atoms \
2926 blocker_atoms.sort()
2927 counter = long(pkg.metadata["COUNTER"])
2928 blocker_cache[cpv] = \
2929 blocker_cache.BlockerData(counter, blocker_atoms)
2932 for atom in blocker_atoms:
2933 blocker = Blocker(atom=atom,
2934 eapi=pkg.metadata["EAPI"], root=myroot)
2935 self._dynamic_config._blocker_parents.add(blocker, pkg)
2936 except portage.exception.InvalidAtom as e:
2937 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2938 show_invalid_depstring_notice(
2939 pkg, depstr, "Invalid Atom: %s" % (e,))
2941 for cpv in stale_cache:
2942 del blocker_cache[cpv]
2943 blocker_cache.flush()
2946 # Discard any "uninstall" tasks scheduled by previous calls
2947 # to this method, since those tasks may not make sense given
2948 # the current graph state.
2949 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
2950 if previous_uninstall_tasks:
2951 self._dynamic_config._blocker_uninstalls = digraph()
2952 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
2954 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
2955 self._spinner_update()
2956 root_config = self._frozen_config.roots[blocker.root]
2957 virtuals = root_config.settings.getvirtuals()
2958 myroot = blocker.root
2959 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
2960 final_db = self._dynamic_config.mydbapi[myroot]
2962 provider_virtual = False
2963 if blocker.cp in virtuals and \
2964 not self._have_new_virt(blocker.root, blocker.cp):
2965 provider_virtual = True
2967 # Use this to check PROVIDE for each matched package
2969 atom_set = InternalPackageSet(
2970 initial_atoms=[blocker.atom])
2972 if provider_virtual:
2974 for provider_entry in virtuals[blocker.cp]:
2976 portage.dep_getkey(provider_entry)
2977 atoms.append(Atom(blocker.atom.replace(
2978 blocker.cp, provider_cp)))
2980 atoms = [blocker.atom]
2982 blocked_initial = set()
2984 for pkg in initial_db.match_pkgs(atom):
2985 if atom_set.findAtomForPackage(pkg):
2986 blocked_initial.add(pkg)
2988 blocked_final = set()
2990 for pkg in final_db.match_pkgs(atom):
2991 if atom_set.findAtomForPackage(pkg):
2992 blocked_final.add(pkg)
2994 if not blocked_initial and not blocked_final:
2995 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
2996 self._dynamic_config._blocker_parents.remove(blocker)
2997 # Discard any parents that don't have any more blockers.
2998 for pkg in parent_pkgs:
2999 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
3000 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3001 self._dynamic_config._blocker_parents.remove(pkg)
3003 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3004 unresolved_blocks = False
3005 depends_on_order = set()
3006 for pkg in blocked_initial:
3007 if pkg.slot_atom == parent.slot_atom and \
3008 not blocker.atom.blocker.overlap.forbid:
3009 # New !!atom blockers do not allow temporary
3010 # simulaneous installation, so unlike !atom
3011 # blockers, !!atom blockers aren't ignored
3012 # when they match other packages occupying
3015 if parent.installed:
3016 # Two currently installed packages conflict with
3017 # eachother. Ignore this case since the damage
3018 # is already done and this would be likely to
3019 # confuse users if displayed like a normal blocker.
3022 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3024 if parent.operation == "merge":
3025 # Maybe the blocked package can be replaced or simply
3026 # unmerged to resolve this block.
3027 depends_on_order.add((pkg, parent))
3029 # None of the above blocker resolutions techniques apply,
3030 # so apparently this one is unresolvable.
3031 unresolved_blocks = True
3032 for pkg in blocked_final:
3033 if pkg.slot_atom == parent.slot_atom and \
3034 not blocker.atom.blocker.overlap.forbid:
3035 # New !!atom blockers do not allow temporary
3036 # simulaneous installation, so unlike !atom
3037 # blockers, !!atom blockers aren't ignored
3038 # when they match other packages occupying
3041 if parent.operation == "nomerge" and \
3042 pkg.operation == "nomerge":
3043 # This blocker will be handled the next time that a
3044 # merge of either package is triggered.
3047 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3049 # Maybe the blocking package can be
3050 # unmerged to resolve this block.
3051 if parent.operation == "merge" and pkg.installed:
3052 depends_on_order.add((pkg, parent))
3054 elif parent.operation == "nomerge":
3055 depends_on_order.add((parent, pkg))
3057 # None of the above blocker resolutions techniques apply,
3058 # so apparently this one is unresolvable.
3059 unresolved_blocks = True
3061 # Make sure we don't unmerge any package that have been pulled
3063 if not unresolved_blocks and depends_on_order:
3064 for inst_pkg, inst_task in depends_on_order:
3065 if self._dynamic_config.digraph.contains(inst_pkg) and \
3066 self._dynamic_config.digraph.parent_nodes(inst_pkg):
3067 unresolved_blocks = True
3070 if not unresolved_blocks and depends_on_order:
3071 for inst_pkg, inst_task in depends_on_order:
3072 uninst_task = Package(built=inst_pkg.built,
3073 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3074 metadata=inst_pkg.metadata,
3075 operation="uninstall",
3076 root_config=inst_pkg.root_config,
3077 type_name=inst_pkg.type_name)
3078 # Enforce correct merge order with a hard dep.
3079 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
3080 priority=BlockerDepPriority.instance)
3081 # Count references to this blocker so that it can be
3082 # invalidated after nodes referencing it have been
3084 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
3085 if not unresolved_blocks and not depends_on_order:
3086 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
3087 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
3088 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
3089 self._dynamic_config._blocker_parents.remove(blocker)
3090 if not self._dynamic_config._blocker_parents.child_nodes(parent):
3091 self._dynamic_config._blocker_parents.remove(parent)
3092 if unresolved_blocks:
3093 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
3097 def _accept_blocker_conflicts(self):
3099 for x in ("--buildpkgonly", "--fetchonly",
3100 "--fetch-all-uri", "--nodeps"):
3101 if x in self._frozen_config.myopts:
3106 def _merge_order_bias(self, mygraph):
3108 For optimal leaf node selection, promote deep system runtime deps and
3109 order nodes from highest to lowest overall reference count.
3113 for node in mygraph.order:
3114 node_info[node] = len(mygraph.parent_nodes(node))
3115 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
3117 def cmp_merge_preference(node1, node2):
3119 if node1.operation == 'uninstall':
3120 if node2.operation == 'uninstall':
3124 if node2.operation == 'uninstall':
3125 if node1.operation == 'uninstall':
3129 node1_sys = node1 in deep_system_deps
3130 node2_sys = node2 in deep_system_deps
3131 if node1_sys != node2_sys:
3136 return node_info[node2] - node_info[node1]
3138 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3140 def altlist(self, reversed=False):
3142 while self._dynamic_config._serialized_tasks_cache is None:
3143 self._resolve_conflicts()
3145 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3146 self._serialize_tasks()
3147 except self._serialize_tasks_retry:
3150 retlist = self._dynamic_config._serialized_tasks_cache[:]
3155 def schedulerGraph(self):
3157 The scheduler graph is identical to the normal one except that
3158 uninstall edges are reversed in specific cases that require
3159 conflicting packages to be temporarily installed simultaneously.
3160 This is intended for use by the Scheduler in it's parallelization
3161 logic. It ensures that temporary simultaneous installation of
3162 conflicting packages is avoided when appropriate (especially for
3163 !!atom blockers), but allowed in specific cases that require it.
3165 Note that this method calls break_refs() which alters the state of
3166 internal Package instances such that this depgraph instance should
3167 not be used to perform any more calculations.
3169 if self._dynamic_config._scheduler_graph is None:
3171 self.break_refs(self._dynamic_config._scheduler_graph.order)
3172 return self._dynamic_config._scheduler_graph
3174 def break_refs(self, nodes):
3176 Take a mergelist like that returned from self.altlist() and
3177 break any references that lead back to the depgraph. This is
3178 useful if you want to hold references to packages without
3179 also holding the depgraph on the heap.
3182 if hasattr(node, "root_config"):
3183 # The FakeVartree references the _package_cache which
3184 # references the depgraph. So that Package instances don't
3185 # hold the depgraph and FakeVartree on the heap, replace
3186 # the RootConfig that references the FakeVartree with the
3187 # original RootConfig instance which references the actual
3189 node.root_config = \
3190 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
3192 def _resolve_conflicts(self):
3193 if not self._complete_graph():
3194 raise self._unknown_internal_error()
3196 if not self._validate_blockers():
3197 raise self._unknown_internal_error()
3199 if self._dynamic_config._slot_collision_info:
3200 self._process_slot_conflicts()
3202 def _serialize_tasks(self):
3204 if "--debug" in self._frozen_config.myopts:
3205 writemsg("\ndigraph:\n\n", noiselevel=-1)
3206 self._dynamic_config.digraph.debug_print()
3207 writemsg("\n", noiselevel=-1)
3209 scheduler_graph = self._dynamic_config.digraph.copy()
3211 if '--nodeps' in self._frozen_config.myopts:
3212 # Preserve the package order given on the command line.
3213 return ([node for node in scheduler_graph \
3214 if isinstance(node, Package) \
3215 and node.operation == 'merge'], scheduler_graph)
3217 mygraph=self._dynamic_config.digraph.copy()
3218 # Prune "nomerge" root nodes if nothing depends on them, since
3219 # otherwise they slow down merge order calculation. Don't remove
3220 # non-root nodes since they help optimize merge order in some cases
3221 # such as revdep-rebuild.
3222 removed_nodes = set()
3224 for node in mygraph.root_nodes():
3225 if not isinstance(node, Package) or \
3226 node.installed or node.onlydeps:
3227 removed_nodes.add(node)
3229 self._spinner_update()
3230 mygraph.difference_update(removed_nodes)
3231 if not removed_nodes:
3233 removed_nodes.clear()
3234 self._merge_order_bias(mygraph)
3235 def cmp_circular_bias(n1, n2):
3237 RDEPEND is stronger than PDEPEND and this function
3238 measures such a strength bias within a circular
3239 dependency relationship.
3241 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3242 ignore_priority=priority_range.ignore_medium_soft)
3243 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3244 ignore_priority=priority_range.ignore_medium_soft)
3245 if n1_n2_medium == n2_n1_medium:
3250 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
3252 # Contains uninstall tasks that have been scheduled to
3253 # occur after overlapping blockers have been installed.
3254 scheduled_uninstalls = set()
3255 # Contains any Uninstall tasks that have been ignored
3256 # in order to avoid the circular deps code path. These
3257 # correspond to blocker conflicts that could not be
3259 ignored_uninstall_tasks = set()
3260 have_uninstall_task = False
3261 complete = "complete" in self._dynamic_config.myparams
3264 def get_nodes(**kwargs):
3266 Returns leaf nodes excluding Uninstall instances
3267 since those should be executed as late as possible.
3269 return [node for node in mygraph.leaf_nodes(**kwargs) \
3270 if isinstance(node, Package) and \
3271 (node.operation != "uninstall" or \
3272 node in scheduled_uninstalls)]
3274 # sys-apps/portage needs special treatment if ROOT="/"
3275 running_root = self._frozen_config._running_root.root
3276 from portage.const import PORTAGE_PACKAGE_ATOM
3277 runtime_deps = InternalPackageSet(
3278 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3279 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
3280 PORTAGE_PACKAGE_ATOM)
3281 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
3282 PORTAGE_PACKAGE_ATOM)
3285 running_portage = running_portage[0]
3287 running_portage = None
3289 if replacement_portage:
3290 replacement_portage = replacement_portage[0]
3292 replacement_portage = None
3294 if replacement_portage == running_portage:
3295 replacement_portage = None
3297 if replacement_portage is not None:
3298 # update from running_portage to replacement_portage asap
3299 asap_nodes.append(replacement_portage)
3301 if running_portage is not None:
3303 portage_rdepend = self._select_atoms_highest_available(
3304 running_root, running_portage.metadata["RDEPEND"],
3305 myuse=running_portage.use.enabled,
3306 parent=running_portage, strict=False)
3307 except portage.exception.InvalidDependString as e:
3308 portage.writemsg("!!! Invalid RDEPEND in " + \
3309 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3310 (running_root, running_portage.cpv, e), noiselevel=-1)
3312 portage_rdepend = {running_portage : []}
3313 for atoms in portage_rdepend.values():
3314 runtime_deps.update(atom for atom in atoms \
3315 if not atom.blocker)
3317 def gather_deps(ignore_priority, mergeable_nodes,
3318 selected_nodes, node):
3320 Recursively gather a group of nodes that RDEPEND on
3321 eachother. This ensures that they are merged as a group
3322 and get their RDEPENDs satisfied as soon as possible.
3324 if node in selected_nodes:
3326 if node not in mergeable_nodes:
3328 if node == replacement_portage and \
3329 mygraph.child_nodes(node,
3330 ignore_priority=priority_range.ignore_medium_soft):
3331 # Make sure that portage always has all of it's
3332 # RDEPENDs installed first.
3334 selected_nodes.add(node)
3335 for child in mygraph.child_nodes(node,
3336 ignore_priority=ignore_priority):
3337 if not gather_deps(ignore_priority,
3338 mergeable_nodes, selected_nodes, child):
3342 def ignore_uninst_or_med(priority):
3343 if priority is BlockerDepPriority.instance:
3345 return priority_range.ignore_medium(priority)
3347 def ignore_uninst_or_med_soft(priority):
3348 if priority is BlockerDepPriority.instance:
3350 return priority_range.ignore_medium_soft(priority)
3352 tree_mode = "--tree" in self._frozen_config.myopts
3353 # Tracks whether or not the current iteration should prefer asap_nodes
3354 # if available. This is set to False when the previous iteration
3355 # failed to select any nodes. It is reset whenever nodes are
3356 # successfully selected.
3359 # Controls whether or not the current iteration should drop edges that
3360 # are "satisfied" by installed packages, in order to solve circular
3361 # dependencies. The deep runtime dependencies of installed packages are
3362 # not checked in this case (bug #199856), so it must be avoided
3363 # whenever possible.
3364 drop_satisfied = False
3366 # State of variables for successive iterations that loosen the
3367 # criteria for node selection.
3369 # iteration prefer_asap drop_satisfied
3374 # If no nodes are selected on the last iteration, it is due to
3375 # unresolved blockers or circular dependencies.
3377 while not mygraph.empty():
3378 self._spinner_update()
3379 selected_nodes = None
3380 ignore_priority = None
3381 if drop_satisfied or (prefer_asap and asap_nodes):
3382 priority_range = DepPrioritySatisfiedRange
3384 priority_range = DepPriorityNormalRange
3385 if prefer_asap and asap_nodes:
3386 # ASAP nodes are merged before their soft deps. Go ahead and
3387 # select root nodes here if necessary, since it's typical for
3388 # the parent to have been removed from the graph already.
3389 asap_nodes = [node for node in asap_nodes \
3390 if mygraph.contains(node)]
3391 for node in asap_nodes:
3392 if not mygraph.child_nodes(node,
3393 ignore_priority=priority_range.ignore_soft):
3394 selected_nodes = [node]
3395 asap_nodes.remove(node)
3397 if not selected_nodes and \
3398 not (prefer_asap and asap_nodes):
3399 for i in range(priority_range.NONE,
3400 priority_range.MEDIUM_SOFT + 1):
3401 ignore_priority = priority_range.ignore_priority[i]
3402 nodes = get_nodes(ignore_priority=ignore_priority)
3404 # If there is a mixture of merges and uninstalls,
3405 # do the uninstalls first.
3407 good_uninstalls = []
3409 if node.operation == "uninstall":
3410 good_uninstalls.append(node)
3413 nodes = good_uninstalls
3417 if ignore_priority is None and not tree_mode:
3418 # Greedily pop all of these nodes since no
3419 # relationship has been ignored. This optimization
3420 # destroys --tree output, so it's disabled in tree
3422 selected_nodes = nodes
3424 # For optimal merge order:
3425 # * Only pop one node.
3426 # * Removing a root node (node without a parent)
3427 # will not produce a leaf node, so avoid it.
3428 # * It's normal for a selected uninstall to be a
3429 # root node, so don't check them for parents.
3431 if node.operation == "uninstall" or \
3432 mygraph.parent_nodes(node):
3433 selected_nodes = [node]
3439 if not selected_nodes:
3440 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
3442 mergeable_nodes = set(nodes)
3443 if prefer_asap and asap_nodes:
3445 for i in range(priority_range.SOFT,
3446 priority_range.MEDIUM_SOFT + 1):
3447 ignore_priority = priority_range.ignore_priority[i]
3449 if not mygraph.parent_nodes(node):
3451 selected_nodes = set()
3452 if gather_deps(ignore_priority,
3453 mergeable_nodes, selected_nodes, node):
3456 selected_nodes = None
3460 if prefer_asap and asap_nodes and not selected_nodes:
3461 # We failed to find any asap nodes to merge, so ignore
3462 # them for the next iteration.
3466 if selected_nodes and ignore_priority is not None:
3467 # Try to merge ignored medium_soft deps as soon as possible
3468 # if they're not satisfied by installed packages.
3469 for node in selected_nodes:
3470 children = set(mygraph.child_nodes(node))
3471 soft = children.difference(
3472 mygraph.child_nodes(node,
3473 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
3474 medium_soft = children.difference(
3475 mygraph.child_nodes(node,
3477 DepPrioritySatisfiedRange.ignore_medium_soft))
3478 medium_soft.difference_update(soft)
3479 for child in medium_soft:
3480 if child in selected_nodes:
3482 if child in asap_nodes:
3484 asap_nodes.append(child)
3486 if selected_nodes and len(selected_nodes) > 1:
3487 if not isinstance(selected_nodes, list):
3488 selected_nodes = list(selected_nodes)
3489 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
3491 if not selected_nodes and not myblocker_uninstalls.is_empty():
3492 # An Uninstall task needs to be executed in order to
3493 # avoid conflict if possible.
3496 priority_range = DepPrioritySatisfiedRange
3498 priority_range = DepPriorityNormalRange
3500 mergeable_nodes = get_nodes(
3501 ignore_priority=ignore_uninst_or_med)
3503 min_parent_deps = None
3505 for task in myblocker_uninstalls.leaf_nodes():
3506 # Do some sanity checks so that system or world packages
3507 # don't get uninstalled inappropriately here (only really
3508 # necessary when --complete-graph has not been enabled).
3510 if task in ignored_uninstall_tasks:
3513 if task in scheduled_uninstalls:
3514 # It's been scheduled but it hasn't
3515 # been executed yet due to dependence
3516 # on installation of blocking packages.
3519 root_config = self._frozen_config.roots[task.root]
3520 inst_pkg = self._pkg(task.cpv, "installed", root_config,
3523 if self._dynamic_config.digraph.contains(inst_pkg):
3526 forbid_overlap = False
3527 heuristic_overlap = False
3528 for blocker in myblocker_uninstalls.parent_nodes(task):
3529 if blocker.eapi in ("0", "1"):
3530 heuristic_overlap = True
3531 elif blocker.atom.blocker.overlap.forbid:
3532 forbid_overlap = True
3534 if forbid_overlap and running_root == task.root:
3537 if heuristic_overlap and running_root == task.root:
3538 # Never uninstall sys-apps/portage or it's essential
3539 # dependencies, except through replacement.
3541 runtime_dep_atoms = \
3542 list(runtime_deps.iterAtomsForPackage(task))
3543 except portage.exception.InvalidDependString as e:
3544 portage.writemsg("!!! Invalid PROVIDE in " + \
3545 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3546 (task.root, task.cpv, e), noiselevel=-1)
3550 # Don't uninstall a runtime dep if it appears
3551 # to be the only suitable one installed.
3553 vardb = root_config.trees["vartree"].dbapi
3554 for atom in runtime_dep_atoms:
3555 other_version = None
3556 for pkg in vardb.match_pkgs(atom):
3557 if pkg.cpv == task.cpv and \
3558 pkg.metadata["COUNTER"] == \
3559 task.metadata["COUNTER"]:
3563 if other_version is None:
3569 # For packages in the system set, don't take
3570 # any chances. If the conflict can't be resolved
3571 # by a normal replacement operation then abort.
3574 for atom in root_config.sets[
3575 "system"].iterAtomsForPackage(task):
3578 except portage.exception.InvalidDependString as e:
3579 portage.writemsg("!!! Invalid PROVIDE in " + \
3580 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3581 (task.root, task.cpv, e), noiselevel=-1)
3587 # Note that the world check isn't always
3588 # necessary since self._complete_graph() will
3589 # add all packages from the system and world sets to the
3590 # graph. This just allows unresolved conflicts to be
3591 # detected as early as possible, which makes it possible
3592 # to avoid calling self._complete_graph() when it is
3593 # unnecessary due to blockers triggering an abortion.
3595 # For packages in the world set, go ahead an uninstall
3596 # when necessary, as long as the atom will be satisfied
3597 # in the final state.
3598 graph_db = self._dynamic_config.mydbapi[task.root]
3601 for atom in root_config.sets[
3602 "selected"].iterAtomsForPackage(task):
3604 for pkg in graph_db.match_pkgs(atom):
3611 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
3613 except portage.exception.InvalidDependString as e:
3614 portage.writemsg("!!! Invalid PROVIDE in " + \
3615 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3616 (task.root, task.cpv, e), noiselevel=-1)
3622 # Check the deps of parent nodes to ensure that
3623 # the chosen task produces a leaf node. Maybe
3624 # this can be optimized some more to make the
3625 # best possible choice, but the current algorithm
3626 # is simple and should be near optimal for most
3628 mergeable_parent = False
3630 for parent in mygraph.parent_nodes(task):
3631 parent_deps.update(mygraph.child_nodes(parent,
3632 ignore_priority=priority_range.ignore_medium_soft))
3633 if parent in mergeable_nodes and \
3634 gather_deps(ignore_uninst_or_med_soft,
3635 mergeable_nodes, set(), parent):
3636 mergeable_parent = True
3638 if not mergeable_parent:
3641 parent_deps.remove(task)
3642 if min_parent_deps is None or \
3643 len(parent_deps) < min_parent_deps:
3644 min_parent_deps = len(parent_deps)
3647 if uninst_task is not None:
3648 # The uninstall is performed only after blocking
3649 # packages have been merged on top of it. File
3650 # collisions between blocking packages are detected
3651 # and removed from the list of files to be uninstalled.
3652 scheduled_uninstalls.add(uninst_task)
3653 parent_nodes = mygraph.parent_nodes(uninst_task)
3655 # Reverse the parent -> uninstall edges since we want
3656 # to do the uninstall after blocking packages have
3657 # been merged on top of it.
3658 mygraph.remove(uninst_task)
3659 for blocked_pkg in parent_nodes:
3660 mygraph.add(blocked_pkg, uninst_task,
3661 priority=BlockerDepPriority.instance)
3662 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3663 scheduler_graph.add(blocked_pkg, uninst_task,
3664 priority=BlockerDepPriority.instance)
3666 # Sometimes a merge node will render an uninstall
3667 # node unnecessary (due to occupying the same SLOT),
3668 # and we want to avoid executing a separate uninstall
3669 # task in that case.
3670 slot_node = self._dynamic_config.mydbapi[uninst_task.root
3671 ].match_pkgs(uninst_task.slot_atom)
3673 slot_node[0].operation == "merge":
3674 mygraph.add(slot_node[0], uninst_task,
3675 priority=BlockerDepPriority.instance)
3677 # Reset the state variables for leaf node selection and
3678 # continue trying to select leaf nodes.
3680 drop_satisfied = False
3683 if not selected_nodes:
3684 # Only select root nodes as a last resort. This case should
3685 # only trigger when the graph is nearly empty and the only
3686 # remaining nodes are isolated (no parents or children). Since
3687 # the nodes must be isolated, ignore_priority is not needed.
3688 selected_nodes = get_nodes()
3690 if not selected_nodes and not drop_satisfied:
3691 drop_satisfied = True
3694 if not selected_nodes and not myblocker_uninstalls.is_empty():
3695 # If possible, drop an uninstall task here in order to avoid
3696 # the circular deps code path. The corresponding blocker will
3697 # still be counted as an unresolved conflict.
3699 for node in myblocker_uninstalls.leaf_nodes():
3701 mygraph.remove(node)
3706 ignored_uninstall_tasks.add(node)
3709 if uninst_task is not None:
3710 # Reset the state variables for leaf node selection and
3711 # continue trying to select leaf nodes.
3713 drop_satisfied = False
3716 if not selected_nodes:
3717 self._dynamic_config._circular_deps_for_display = mygraph
3718 raise self._unknown_internal_error()
3720 # At this point, we've succeeded in selecting one or more nodes, so
3721 # reset state variables for leaf node selection.
3723 drop_satisfied = False
3725 mygraph.difference_update(selected_nodes)
3727 for node in selected_nodes:
3728 if isinstance(node, Package) and \
3729 node.operation == "nomerge":
3732 # Handle interactions between blockers
3733 # and uninstallation tasks.
3734 solved_blockers = set()
3736 if isinstance(node, Package) and \
3737 "uninstall" == node.operation:
3738 have_uninstall_task = True
3741 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
3742 previous_cpv = vardb.match(node.slot_atom)
3744 # The package will be replaced by this one, so remove
3745 # the corresponding Uninstall task if necessary.
3746 previous_cpv = previous_cpv[0]
3748 ("installed", node.root, previous_cpv, "uninstall")
3750 mygraph.remove(uninst_task)
3754 if uninst_task is not None and \
3755 uninst_task not in ignored_uninstall_tasks and \
3756 myblocker_uninstalls.contains(uninst_task):
3757 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3758 myblocker_uninstalls.remove(uninst_task)
3759 # Discard any blockers that this Uninstall solves.
3760 for blocker in blocker_nodes:
3761 if not myblocker_uninstalls.child_nodes(blocker):
3762 myblocker_uninstalls.remove(blocker)
3764 self._dynamic_config._unsolvable_blockers:
3765 solved_blockers.add(blocker)
3767 retlist.append(node)
3769 if (isinstance(node, Package) and \
3770 "uninstall" == node.operation) or \
3771 (uninst_task is not None and \
3772 uninst_task in scheduled_uninstalls):
3773 # Include satisfied blockers in the merge list
3774 # since the user might be interested and also
3775 # it serves as an indicator that blocking packages
3776 # will be temporarily installed simultaneously.
3777 for blocker in solved_blockers:
3778 blocker.satisfied = True
3779 retlist.append(blocker)
3781 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
3782 for node in myblocker_uninstalls.root_nodes():
3783 unsolvable_blockers.add(node)
3785 for blocker in unsolvable_blockers:
3786 retlist.append(blocker)
3788 # If any Uninstall tasks need to be executed in order
3789 # to avoid a conflict, complete the graph with any
3790 # dependencies that may have been initially
3791 # neglected (to ensure that unsafe Uninstall tasks
3792 # are properly identified and blocked from execution).
3793 if have_uninstall_task and \
3795 not unsolvable_blockers:
3796 self._dynamic_config.myparams["complete"] = True
3797 raise self._serialize_tasks_retry("")
3799 if unsolvable_blockers and \
3800 not self._accept_blocker_conflicts():
3801 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
3802 self._dynamic_config._serialized_tasks_cache = retlist[:]
3803 self._dynamic_config._scheduler_graph = scheduler_graph
3804 raise self._unknown_internal_error()
3806 if self._dynamic_config._slot_collision_info and \
3807 not self._accept_blocker_conflicts():
3808 self._dynamic_config._serialized_tasks_cache = retlist[:]
3809 self._dynamic_config._scheduler_graph = scheduler_graph
3810 raise self._unknown_internal_error()
3812 return retlist, scheduler_graph
3814 def _show_circular_deps(self, mygraph):
3815 # No leaf nodes are available, so we have a circular
3816 # dependency panic situation. Reduce the noise level to a
3817 # minimum via repeated elimination of root nodes since they
3818 # have no parents and thus can not be part of a cycle.
3820 root_nodes = mygraph.root_nodes(
3821 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3824 mygraph.difference_update(root_nodes)
3825 # Display the USE flags that are enabled on nodes that are part
3826 # of dependency cycles in case that helps the user decide to
3827 # disable some of them.
3829 tempgraph = mygraph.copy()
3830 while not tempgraph.empty():
3831 nodes = tempgraph.leaf_nodes()
3833 node = tempgraph.order[0]
3836 display_order.append(node)
3837 tempgraph.remove(node)
3838 display_order.reverse()
3839 self._frozen_config.myopts.pop("--quiet", None)
3840 self._frozen_config.myopts.pop("--verbose", None)
3841 self._frozen_config.myopts["--tree"] = True
3842 portage.writemsg("\n\n", noiselevel=-1)
3843 self.display(display_order)
3844 prefix = colorize("BAD", " * ")
3845 portage.writemsg("\n", noiselevel=-1)
3846 portage.writemsg(prefix + "Error: circular dependencies:\n",
3848 portage.writemsg("\n", noiselevel=-1)
3849 mygraph.debug_print()
3850 portage.writemsg("\n", noiselevel=-1)
3851 portage.writemsg(prefix + "Note that circular dependencies " + \
3852 "can often be avoided by temporarily\n", noiselevel=-1)
3853 portage.writemsg(prefix + "disabling USE flags that trigger " + \
3854 "optional dependencies.\n", noiselevel=-1)
3856 def _show_merge_list(self):
3857 if self._dynamic_config._serialized_tasks_cache is not None and \
3858 not (self._dynamic_config._displayed_list and \
3859 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
3860 self._dynamic_config._displayed_list == \
3861 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
3862 display_list = self._dynamic_config._serialized_tasks_cache[:]
3863 if "--tree" in self._frozen_config.myopts:
3864 display_list.reverse()
3865 self.display(display_list)
3867 def _show_unsatisfied_blockers(self, blockers):
3868 self._show_merge_list()
3869 msg = "Error: The above package list contains " + \
3870 "packages which cannot be installed " + \
3871 "at the same time on the same system."
3872 prefix = colorize("BAD", " * ")
3873 from textwrap import wrap
3874 portage.writemsg("\n", noiselevel=-1)
3875 for line in wrap(msg, 70):
3876 portage.writemsg(prefix + line + "\n", noiselevel=-1)
3878 # Display the conflicting packages along with the packages
3879 # that pulled them in. This is helpful for troubleshooting
3880 # cases in which blockers don't solve automatically and
3881 # the reasons are not apparent from the normal merge list
3885 for blocker in blockers:
3886 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
3887 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
3888 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3889 if not parent_atoms:
3890 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
3891 if atom is not None:
3892 parent_atoms = set([("world", atom)])
3894 conflict_pkgs[pkg] = parent_atoms
3897 # Reduce noise by pruning packages that are only
3898 # pulled in by other conflict packages.
3900 for pkg, parent_atoms in conflict_pkgs.items():
3901 relevant_parent = False
3902 for parent, atom in parent_atoms:
3903 if parent not in conflict_pkgs:
3904 relevant_parent = True
3906 if not relevant_parent:
3907 pruned_pkgs.add(pkg)
3908 for pkg in pruned_pkgs:
3909 del conflict_pkgs[pkg]
3915 # Max number of parents shown, to avoid flooding the display.
3917 for pkg, parent_atoms in conflict_pkgs.items():
3921 # Prefer packages that are not directly involved in a conflict.
3922 for parent_atom in parent_atoms:
3923 if len(pruned_list) >= max_parents:
3925 parent, atom = parent_atom
3926 if parent not in conflict_pkgs:
3927 pruned_list.add(parent_atom)
3929 for parent_atom in parent_atoms:
3930 if len(pruned_list) >= max_parents:
3932 pruned_list.add(parent_atom)
3934 omitted_parents = len(parent_atoms) - len(pruned_list)
3935 msg.append(indent + "%s pulled in by\n" % pkg)
3937 for parent_atom in pruned_list:
3938 parent, atom = parent_atom
3939 msg.append(2*indent)
3940 if isinstance(parent,
3941 (PackageArg, AtomArg)):
3942 # For PackageArg and AtomArg types, it's
3943 # redundant to display the atom attribute.
3944 msg.append(str(parent))
3946 # Display the specific atom from SetArg or
3948 msg.append("%s required by %s" % (atom, parent))
3952 msg.append(2*indent)
3953 msg.append("(and %d more)\n" % omitted_parents)
3957 sys.stderr.write("".join(msg))
3960 if "--quiet" not in self._frozen_config.myopts:
3961 show_blocker_docs_link()
3963 def display(self, mylist, favorites=[], verbosity=None):
3965 # This is used to prevent display_problems() from
3966 # redundantly displaying this exact same merge list
3967 # again via _show_merge_list().
3968 self._dynamic_config._displayed_list = mylist
3970 if verbosity is None:
3971 verbosity = ("--quiet" in self._frozen_config.myopts and 1 or \
3972 "--verbose" in self._frozen_config.myopts and 3 or 2)
3973 favorites_set = InternalPackageSet(favorites)
3974 oneshot = "--oneshot" in self._frozen_config.myopts or \
3975 "--onlydeps" in self._frozen_config.myopts
3976 columns = "--columns" in self._frozen_config.myopts
3977 tree_display = "--tree" in self._frozen_config.myopts
3982 counters = PackageCounters()
3984 if verbosity == 1 and "--verbose" not in self._frozen_config.myopts:
3985 def create_use_string(*args):
3988 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3990 is_new, reinst_flags,
3991 all_flags=(verbosity == 3 or "--quiet" in self._frozen_config.myopts),
3992 alphabetical=("--alphabetical" in self._frozen_config.myopts)):
4000 cur_iuse = set(cur_iuse)
4001 enabled_flags = cur_iuse.intersection(cur_use)
4002 removed_iuse = set(old_iuse).difference(cur_iuse)
4003 any_iuse = cur_iuse.union(old_iuse)
4004 any_iuse = list(any_iuse)
4006 for flag in any_iuse:
4009 reinst_flag = reinst_flags and flag in reinst_flags
4010 if flag in enabled_flags:
4012 if is_new or flag in old_use and \
4013 (all_flags or reinst_flag):
4014 flag_str = red(flag)
4015 elif flag not in old_iuse:
4016 flag_str = yellow(flag) + "%*"
4017 elif flag not in old_use:
4018 flag_str = green(flag) + "*"
4019 elif flag in removed_iuse:
4020 if all_flags or reinst_flag:
4021 flag_str = yellow("-" + flag) + "%"
4024 flag_str = "(" + flag_str + ")"
4025 removed.append(flag_str)
4028 if is_new or flag in old_iuse and \
4029 flag not in old_use and \
4030 (all_flags or reinst_flag):
4031 flag_str = blue("-" + flag)
4032 elif flag not in old_iuse:
4033 flag_str = yellow("-" + flag)
4034 if flag not in iuse_forced:
4036 elif flag in old_use:
4037 flag_str = green("-" + flag) + "*"
4039 if flag in iuse_forced:
4040 flag_str = "(" + flag_str + ")"
4042 enabled.append(flag_str)
4044 disabled.append(flag_str)
4047 ret = " ".join(enabled)
4049 ret = " ".join(enabled + disabled + removed)
4051 ret = '%s="%s" ' % (name, ret)
4054 repo_display = RepoDisplay(self._frozen_config.roots)
4055 unsatisfied_blockers = []
4058 if isinstance(x, Blocker):
4059 counters.blocks += 1
4061 ordered_nodes.append(x)
4062 counters.blocks_satisfied += 1
4064 unsatisfied_blockers.append(x)
4066 ordered_nodes.append(x)
4069 display_list = self._tree_display(ordered_nodes)
4071 display_list = [(x, 0, True) for x in ordered_nodes]
4073 mylist = display_list
4074 for x in unsatisfied_blockers:
4075 mylist.append((x, 0, True))
4077 # files to fetch list - avoids counting a same file twice
4078 # in size display (verbose mode)
4081 # Use this set to detect when all the "repoadd" strings are "[0]"
4082 # and disable the entire repo display in this case.
4085 for mylist_index in range(len(mylist)):
4086 x, depth, ordered = mylist[mylist_index]
4090 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4091 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
4092 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4093 vartree = self._frozen_config.trees[myroot]["vartree"]
4094 pkgsettings = self._frozen_config.pkgsettings[myroot]
4097 indent = " " * depth
4099 if isinstance(x, Blocker):
4101 blocker_style = "PKG_BLOCKER_SATISFIED"
4102 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4104 blocker_style = "PKG_BLOCKER"
4105 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4106 resolved = portage.dep_expand(
4107 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
4108 if "--columns" in self._frozen_config.myopts and "--quiet" in self._frozen_config.myopts:
4109 addl += " " + colorize(blocker_style, str(resolved))
4111 addl = "[%s %s] %s%s" % \
4112 (colorize(blocker_style, "blocks"),
4113 addl, indent, colorize(blocker_style, str(resolved)))
4114 block_parents = self._dynamic_config._blocker_parents.parent_nodes(x)
4115 block_parents = set([pnode[2] for pnode in block_parents])
4116 block_parents = ", ".join(block_parents)
4118 addl += colorize(blocker_style,
4119 " (\"%s\" is blocking %s)") % \
4120 (str(x.atom).lstrip("!"), block_parents)
4122 addl += colorize(blocker_style,
4123 " (is blocking %s)") % block_parents
4124 if isinstance(x, Blocker) and x.satisfied:
4129 blockers.append(addl)
4132 pkg_merge = ordered and pkg_status == "merge"
4133 if not pkg_merge and pkg_status == "merge":
4134 pkg_status = "nomerge"
4135 built = pkg_type != "ebuild"
4136 installed = pkg_type == "installed"
4138 metadata = pkg.metadata
4140 repo_name = metadata["repository"]
4141 if pkg.type_name == "ebuild":
4142 ebuild_path = portdb.findname(pkg.cpv)
4143 if ebuild_path is None:
4144 raise AssertionError(
4145 "ebuild not found for '%s'" % pkg.cpv)
4146 repo_path_real = os.path.dirname(os.path.dirname(
4147 os.path.dirname(ebuild_path)))
4149 repo_path_real = portdb.getRepositoryPath(repo_name)
4150 pkg_use = list(pkg.use.enabled)
4151 if not pkg.built and pkg.operation == 'merge' and \
4152 'fetch' in pkg.metadata.restrict:
4155 counters.restrict_fetch += 1
4156 if portdb.fetch_check(pkg_key, pkg_use):
4159 counters.restrict_fetch_satisfied += 1
4161 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4162 #param is used for -u, where you still *do* want to see when something is being upgraded.
4165 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4166 if vardb.cpv_exists(pkg_key):
4167 addl=" "+yellow("R")+fetch+" "
4170 counters.reinst += 1
4171 if pkg_type == "binary":
4172 counters.binary += 1
4173 elif pkg_status == "uninstall":
4174 counters.uninst += 1
4175 # filter out old-style virtual matches
4176 elif installed_versions and \
4177 portage.cpv_getkey(installed_versions[0]) == \
4178 portage.cpv_getkey(pkg_key):
4179 myinslotlist = vardb.match(pkg.slot_atom)
4180 # If this is the first install of a new-style virtual, we
4181 # need to filter out old-style virtual matches.
4182 if myinslotlist and \
4183 portage.cpv_getkey(myinslotlist[0]) != \
4184 portage.cpv_getkey(pkg_key):
4187 myoldbest = myinslotlist[:]
4189 if not portage.dep.cpvequal(pkg_key,
4190 portage.best([pkg_key] + myoldbest)):
4192 addl += turquoise("U")+blue("D")
4194 counters.downgrades += 1
4195 if pkg_type == "binary":
4196 counters.binary += 1
4199 addl += turquoise("U") + " "
4201 counters.upgrades += 1
4202 if pkg_type == "binary":
4203 counters.binary += 1
4205 # New slot, mark it new.
4206 addl = " " + green("NS") + fetch + " "
4207 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4209 counters.newslot += 1
4210 if pkg_type == "binary":
4211 counters.binary += 1
4213 if "--changelog" in self._frozen_config.myopts:
4214 inst_matches = vardb.match(pkg.slot_atom)
4216 ebuild_path_cl = ebuild_path
4217 if ebuild_path_cl is None:
4219 ebuild_path_cl = portdb.findname(pkg.cpv)
4221 if ebuild_path_cl is not None:
4222 changelogs.extend(calc_changelog(
4223 ebuild_path_cl, inst_matches[0], pkg.cpv))
4225 addl = " " + green("N") + " " + fetch + " "
4228 if pkg_type == "binary":
4229 counters.binary += 1
4236 forced_flags = set()
4237 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
4238 forced_flags.update(pkgsettings.useforce)
4239 forced_flags.update(pkgsettings.usemask)
4241 cur_use = [flag for flag in pkg.use.enabled \
4242 if flag in pkg.iuse.all]
4243 cur_iuse = sorted(pkg.iuse.all)
4245 if myoldbest and myinslotlist:
4246 previous_cpv = myoldbest[0]
4248 previous_cpv = pkg.cpv
4249 if vardb.cpv_exists(previous_cpv):
4250 old_iuse, old_use = vardb.aux_get(
4251 previous_cpv, ["IUSE", "USE"])
4252 old_iuse = list(set(
4253 filter_iuse_defaults(old_iuse.split())))
4255 old_use = old_use.split()
4262 old_use = [flag for flag in old_use if flag in old_iuse]
4264 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4266 use_expand.reverse()
4267 use_expand_hidden = \
4268 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4270 def map_to_use_expand(myvals, forcedFlags=False,
4274 for exp in use_expand:
4277 for val in myvals[:]:
4278 if val.startswith(exp.lower()+"_"):
4279 if val in forced_flags:
4280 forced[exp].add(val[len(exp)+1:])
4281 ret[exp].append(val[len(exp)+1:])
4284 forced["USE"] = [val for val in myvals \
4285 if val in forced_flags]
4287 for exp in use_expand_hidden:
4293 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4294 # are the only thing that triggered reinstallation.
4295 reinst_flags_map = {}
4296 reinstall_for_flags = self._dynamic_config._reinstall_nodes.get(pkg)
4297 reinst_expand_map = None
4298 if reinstall_for_flags:
4299 reinst_flags_map = map_to_use_expand(
4300 list(reinstall_for_flags), removeHidden=False)
4301 for k in list(reinst_flags_map):
4302 if not reinst_flags_map[k]:
4303 del reinst_flags_map[k]
4304 if not reinst_flags_map.get("USE"):
4305 reinst_expand_map = reinst_flags_map.copy()
4306 reinst_expand_map.pop("USE", None)
4307 if reinst_expand_map and \
4308 not set(reinst_expand_map).difference(
4310 use_expand_hidden = \
4311 set(use_expand_hidden).difference(
4314 cur_iuse_map, iuse_forced = \
4315 map_to_use_expand(cur_iuse, forcedFlags=True)
4316 cur_use_map = map_to_use_expand(cur_use)
4317 old_iuse_map = map_to_use_expand(old_iuse)
4318 old_use_map = map_to_use_expand(old_use)
4321 use_expand.insert(0, "USE")
4323 for key in use_expand:
4324 if key in use_expand_hidden:
4326 verboseadd += create_use_string(key.upper(),
4327 cur_iuse_map[key], iuse_forced[key],
4328 cur_use_map[key], old_iuse_map[key],
4329 old_use_map[key], is_new,
4330 reinst_flags_map.get(key))
4335 if pkg_type == "ebuild" and pkg_merge:
4337 myfilesdict = portdb.getfetchsizes(pkg_key,
4338 useflags=pkg_use, debug=self._frozen_config.edebug)
4339 except portage.exception.InvalidDependString as e:
4340 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4341 show_invalid_depstring_notice(x, src_uri, str(e))
4344 if myfilesdict is None:
4345 myfilesdict="[empty/missing/bad digest]"
4347 for myfetchfile in myfilesdict:
4348 if myfetchfile not in myfetchlist:
4349 mysize+=myfilesdict[myfetchfile]
4350 myfetchlist.append(myfetchfile)
4352 counters.totalsize += mysize
4353 verboseadd += format_size(mysize)
4356 # assign index for a previous version in the same slot
4357 has_previous = False
4358 repo_name_prev = None
4359 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4361 slot_matches = vardb.match(slot_atom)
4364 repo_name_prev = vardb.aux_get(slot_matches[0],
4367 # now use the data to generate output
4368 if pkg.installed or not has_previous:
4369 repoadd = repo_display.repoStr(repo_path_real)
4371 repo_path_prev = None
4373 repo_path_prev = portdb.getRepositoryPath(
4375 if repo_path_prev == repo_path_real:
4376 repoadd = repo_display.repoStr(repo_path_real)
4378 repoadd = "%s=>%s" % (
4379 repo_display.repoStr(repo_path_prev),
4380 repo_display.repoStr(repo_path_real))
4382 repoadd_set.add(repoadd)
4384 xs = [portage.cpv_getkey(pkg_key)] + \
4385 list(portage.catpkgsplit(pkg_key)[2:])
4392 if "COLUMNWIDTH" in self._frozen_config.settings:
4394 mywidth = int(self._frozen_config.settings["COLUMNWIDTH"])
4395 except ValueError as e:
4396 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4398 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4399 self._frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
4401 oldlp = mywidth - 30
4404 # Convert myoldbest from a list to a string.
4408 for pos, key in enumerate(myoldbest):
4409 key = portage.catpkgsplit(key)[2] + \
4410 "-" + portage.catpkgsplit(key)[3]
4411 if key[-3:] == "-r0":
4413 myoldbest[pos] = key
4414 myoldbest = blue("["+", ".join(myoldbest)+"]")
4417 root_config = self._frozen_config.roots[myroot]
4418 system_set = root_config.sets["system"]
4419 world_set = root_config.sets["selected"]
4424 pkg_system = system_set.findAtomForPackage(pkg)
4425 pkg_world = world_set.findAtomForPackage(pkg)
4426 if not (oneshot or pkg_world) and \
4427 myroot == self._frozen_config.target_root and \
4428 favorites_set.findAtomForPackage(pkg):
4429 # Maybe it will be added to world now.
4430 if create_world_atom(pkg, favorites_set, root_config):
4432 except portage.exception.InvalidDependString:
4433 # This is reported elsewhere if relevant.
4436 def pkgprint(pkg_str):
4439 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4441 return colorize("PKG_MERGE_WORLD", pkg_str)
4443 return colorize("PKG_MERGE", pkg_str)
4444 elif pkg_status == "uninstall":
4445 return colorize("PKG_UNINSTALL", pkg_str)
4448 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4450 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4452 return colorize("PKG_NOMERGE", pkg_str)
4454 if 'interactive' in pkg.metadata.properties and \
4455 pkg.operation == 'merge':
4456 addl = colorize("WARN", "I") + addl[1:]
4458 counters.interactive += 1
4463 if "--columns" in self._frozen_config.myopts:
4464 if "--quiet" in self._frozen_config.myopts:
4465 myprint=addl+" "+indent+pkgprint(pkg_cp)
4466 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4467 myprint=myprint+myoldbest
4468 myprint=myprint+darkgreen("to "+x[1])
4472 myprint = "[%s] %s%s" % \
4473 (pkgprint(pkg_status.ljust(13)),
4474 indent, pkgprint(pkg.cp))
4476 myprint = "[%s %s] %s%s" % \
4477 (pkgprint(pkg.type_name), addl,
4478 indent, pkgprint(pkg.cp))
4479 if (newlp-nc_len(myprint)) > 0:
4480 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4481 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4482 if (oldlp-nc_len(myprint)) > 0:
4483 myprint=myprint+" "*(oldlp-nc_len(myprint))
4484 myprint=myprint+myoldbest
4485 myprint += darkgreen("to " + pkg.root)
4488 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4490 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4491 myprint += indent + pkgprint(pkg_key) + " " + \
4492 myoldbest + darkgreen("to " + myroot)
4494 if "--columns" in self._frozen_config.myopts:
4495 if "--quiet" in self._frozen_config.myopts:
4496 myprint=addl+" "+indent+pkgprint(pkg_cp)
4497 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4498 myprint=myprint+myoldbest
4502 myprint = "[%s] %s%s" % \
4503 (pkgprint(pkg_status.ljust(13)),
4504 indent, pkgprint(pkg.cp))
4506 myprint = "[%s %s] %s%s" % \
4507 (pkgprint(pkg.type_name), addl,
4508 indent, pkgprint(pkg.cp))
4509 if (newlp-nc_len(myprint)) > 0:
4510 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4511 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4512 if (oldlp-nc_len(myprint)) > 0:
4513 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4514 myprint += myoldbest
4517 myprint = "[%s] %s%s %s" % \
4518 (pkgprint(pkg_status.ljust(13)),
4519 indent, pkgprint(pkg.cpv),
4522 myprint = "[%s %s] %s%s %s" % \
4523 (pkgprint(pkg_type), addl, indent,
4524 pkgprint(pkg.cpv), myoldbest)
4526 if columns and pkg.operation == "uninstall":
4528 p.append((myprint, verboseadd, repoadd))
4530 if "--tree" not in self._frozen_config.myopts and \
4531 "--quiet" not in self._frozen_config.myopts and \
4532 not self._frozen_config._opts_no_restart.intersection(self._frozen_config.myopts) and \
4533 pkg.root == self._frozen_config._running_root.root and \
4534 portage.match_from_list(
4535 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4536 not vardb.cpv_exists(pkg.cpv) and \
4537 "--quiet" not in self._frozen_config.myopts:
4538 if mylist_index < len(mylist) - 1:
4539 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4540 p.append(colorize("WARN", " then resume the merge."))
4543 show_repos = repoadd_set and repoadd_set != set(["0"])
4546 if isinstance(x, basestring):
4547 out.write("%s\n" % (x,))
4550 myprint, verboseadd, repoadd = x
4553 myprint += " " + verboseadd
4555 if show_repos and repoadd:
4556 myprint += " " + teal("[%s]" % repoadd)
4558 out.write("%s\n" % (myprint,))
4567 # In python-2.x, str() can trigger a UnicodeEncodeError here,
4568 # so call __str__() directly.
4569 writemsg_stdout(repo_display.__str__(), noiselevel=-1)
4571 if "--changelog" in self._frozen_config.myopts:
4572 writemsg_stdout('\n', noiselevel=-1)
4573 for revision,text in changelogs:
4574 writemsg_stdout(bold('*'+revision) + '\n' + text,
4580 def _tree_display(self, mylist):
4582 # If there are any Uninstall instances, add the
4583 # corresponding blockers to the digraph.
4584 mygraph = self._dynamic_config.digraph.copy()
4586 executed_uninstalls = set(node for node in mylist \
4587 if isinstance(node, Package) and node.operation == "unmerge")
4589 for uninstall in self._dynamic_config._blocker_uninstalls.leaf_nodes():
4590 uninstall_parents = \
4591 self._dynamic_config._blocker_uninstalls.parent_nodes(uninstall)
4592 if not uninstall_parents:
4595 # Remove the corresponding "nomerge" node and substitute
4596 # the Uninstall node.
4597 inst_pkg = self._pkg(uninstall.cpv, "installed",
4598 uninstall.root_config, installed=True)
4601 mygraph.remove(inst_pkg)
4606 inst_pkg_blockers = self._dynamic_config._blocker_parents.child_nodes(inst_pkg)
4608 inst_pkg_blockers = []
4610 # Break the Package -> Uninstall edges.
4611 mygraph.remove(uninstall)
4613 # Resolution of a package's blockers
4614 # depend on it's own uninstallation.
4615 for blocker in inst_pkg_blockers:
4616 mygraph.add(uninstall, blocker)
4618 # Expand Package -> Uninstall edges into
4619 # Package -> Blocker -> Uninstall edges.
4620 for blocker in uninstall_parents:
4621 mygraph.add(uninstall, blocker)
4622 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4623 if parent != inst_pkg:
4624 mygraph.add(blocker, parent)
4626 # If the uninstall task did not need to be executed because
4627 # of an upgrade, display Blocker -> Upgrade edges since the
4628 # corresponding Blocker -> Uninstall edges will not be shown.
4630 self._dynamic_config._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
4631 if upgrade_node is not None and \
4632 uninstall not in executed_uninstalls:
4633 for blocker in uninstall_parents:
4634 mygraph.add(upgrade_node, blocker)
4636 if "--unordered-display" in self._frozen_config.myopts:
4637 display_list = self._unordered_tree_display(mygraph, mylist)
4639 display_list = self._ordered_tree_display(mygraph, mylist)
4641 self._prune_tree_display(display_list)
4645 def _unordered_tree_display(self, mygraph, mylist):
4649 def print_node(node, depth):
4651 if node in seen_nodes:
4654 seen_nodes.add(node)
4656 if isinstance(node, (Blocker, Package)):
4657 display_list.append((node, depth, True))
4661 for child_node in mygraph.child_nodes(node):
4662 print_node(child_node, depth + 1)
4664 for root_node in mygraph.root_nodes():
4665 print_node(root_node, 0)
4669 def _ordered_tree_display(self, mygraph, mylist):
4676 depth = len(tree_nodes)
4677 while depth and x not in \
4678 mygraph.child_nodes(tree_nodes[depth-1]):
4681 tree_nodes = tree_nodes[:depth]
4682 tree_nodes.append(x)
4683 display_list.append((x, depth, True))
4684 shown_edges.add((x, tree_nodes[depth-1]))
4686 traversed_nodes = set() # prevent endless circles
4687 traversed_nodes.add(x)
4688 def add_parents(current_node, ordered):
4690 # Do not traverse to parents if this node is an
4691 # an argument or a direct member of a set that has
4692 # been specified as an argument (system or world).
4693 if current_node not in self._dynamic_config._set_nodes:
4695 parent_nodes = mygraph.parent_nodes(current_node)
4697 # This can happen sometimes for blockers.
4700 child_nodes = set(mygraph.child_nodes(current_node))
4701 selected_parent = None
4702 # First, try to avoid a direct cycle.
4703 for node in parent_nodes:
4704 if not isinstance(node, (Blocker, Package)):
4706 if node not in traversed_nodes and \
4707 node not in child_nodes:
4708 edge = (current_node, node)
4709 if edge in shown_edges:
4711 selected_parent = node
4713 if not selected_parent:
4714 # A direct cycle is unavoidable.
4715 for node in parent_nodes:
4716 if not isinstance(node, (Blocker, Package)):
4718 if node not in traversed_nodes:
4719 edge = (current_node, node)
4720 if edge in shown_edges:
4722 selected_parent = node
4725 shown_edges.add((current_node, selected_parent))
4726 traversed_nodes.add(selected_parent)
4727 add_parents(selected_parent, False)
4728 display_list.append((current_node,
4729 len(tree_nodes), ordered))
4730 tree_nodes.append(current_node)
4732 add_parents(x, True)
4736 def _prune_tree_display(self, display_list):
4737 last_merge_depth = 0
4738 for i in range(len(display_list) - 1, -1, -1):
4739 node, depth, ordered = display_list[i]
4740 if not ordered and depth == 0 and i > 0 \
4741 and node == display_list[i-1][0] and \
4742 display_list[i-1][1] == 0:
4743 # An ordered node got a consecutive duplicate
4744 # when the tree was being filled in.
4747 if ordered and isinstance(node, Package) \
4748 and node.operation in ('merge', 'uninstall'):
4749 last_merge_depth = depth
4751 if depth >= last_merge_depth or \
4752 i < len(display_list) - 1 and \
4753 depth >= display_list[i+1][1]:
4756 def display_problems(self):
4758 Display problems with the dependency graph such as slot collisions.
4759 This is called internally by display() to show the problems _after_
4760 the merge list where it is most likely to be seen, but if display()
4761 is not going to be called then this method should be called explicitly
4762 to ensure that the user is notified of problems with the graph.
4764 All output goes to stderr, except for unsatisfied dependencies which
4765 go to stdout for parsing by programs such as autounmask.
4768 # Note that show_masked_packages() sends it's output to
4769 # stdout, and some programs such as autounmask parse the
4770 # output in cases when emerge bails out. However, when
4771 # show_masked_packages() is called for installed packages
4772 # here, the message is a warning that is more appropriate
4773 # to send to stderr, so temporarily redirect stdout to
4774 # stderr. TODO: Fix output code so there's a cleaner way
4775 # to redirect everything to stderr.
4780 sys.stdout = sys.stderr
4781 self._display_problems()
4787 # This goes to stdout for parsing by programs like autounmask.
4788 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4789 self._show_unsatisfied_dep(*pargs, **kwargs)
4791 def _display_problems(self):
4792 if self._dynamic_config._circular_deps_for_display is not None:
4793 self._show_circular_deps(
4794 self._dynamic_config._circular_deps_for_display)
4796 # The user is only notified of a slot conflict if
4797 # there are no unresolvable blocker conflicts.
4798 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4799 self._show_unsatisfied_blockers(
4800 self._dynamic_config._unsatisfied_blockers_for_display)
4801 elif self._dynamic_config._slot_collision_info:
4802 self._show_slot_collision_notice()
4804 self._show_missed_update()
4806 # TODO: Add generic support for "set problem" handlers so that
4807 # the below warnings aren't special cases for world only.
4809 if self._dynamic_config._missing_args:
4810 world_problems = False
4811 if "world" in self._dynamic_config._sets:
4812 # Filter out indirect members of world (from nested sets)
4813 # since only direct members of world are desired here.
4814 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
4815 for arg, atom in self._dynamic_config._missing_args:
4816 if arg.name in ("selected", "world") and atom in world_set:
4817 world_problems = True
4821 sys.stderr.write("\n!!! Problems have been " + \
4822 "detected with your world file\n")
4823 sys.stderr.write("!!! Please run " + \
4824 green("emaint --check world")+"\n\n")
4826 if self._dynamic_config._missing_args:
4827 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4828 " Ebuilds for the following packages are either all\n")
4829 sys.stderr.write(colorize("BAD", "!!!") + \
4830 " masked or don't exist:\n")
4831 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4832 self._dynamic_config._missing_args) + "\n")
4834 if self._dynamic_config._pprovided_args:
4836 for arg, atom in self._dynamic_config._pprovided_args:
4837 if isinstance(arg, SetArg):
4839 arg_atom = (atom, atom)
4842 arg_atom = (arg.arg, atom)
4843 refs = arg_refs.setdefault(arg_atom, [])
4844 if parent not in refs:
4847 msg.append(bad("\nWARNING: "))
4848 if len(self._dynamic_config._pprovided_args) > 1:
4849 msg.append("Requested packages will not be " + \
4850 "merged because they are listed in\n")
4852 msg.append("A requested package will not be " + \
4853 "merged because it is listed in\n")
4854 msg.append("package.provided:\n\n")
4855 problems_sets = set()
4856 for (arg, atom), refs in arg_refs.items():
4859 problems_sets.update(refs)
4861 ref_string = ", ".join(["'%s'" % name for name in refs])
4862 ref_string = " pulled in by " + ref_string
4863 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4865 if "selected" in problems_sets or "world" in problems_sets:
4866 msg.append("This problem can be solved in one of the following ways:\n\n")
4867 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4868 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4869 msg.append(" C) Remove offending entries from package.provided.\n\n")
4870 msg.append("The best course of action depends on the reason that an offending\n")
4871 msg.append("package.provided entry exists.\n\n")
4872 sys.stderr.write("".join(msg))
4874 masked_packages = []
4875 for pkg in self._dynamic_config._masked_license_updates:
4876 root_config = pkg.root_config
4877 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4878 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4879 masked_packages.append((root_config, pkgsettings,
4880 pkg.cpv, pkg.metadata, mreasons))
4882 writemsg("\n" + colorize("BAD", "!!!") + \
4883 " The following updates are masked by LICENSE changes:\n",
4885 show_masked_packages(masked_packages)
4887 writemsg("\n", noiselevel=-1)
4889 masked_packages = []
4890 for pkg in self._dynamic_config._masked_installed:
4891 root_config = pkg.root_config
4892 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4893 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4894 masked_packages.append((root_config, pkgsettings,
4895 pkg.cpv, pkg.metadata, mreasons))
4897 writemsg("\n" + colorize("BAD", "!!!") + \
4898 " The following installed packages are masked:\n",
4900 show_masked_packages(masked_packages)
4902 writemsg("\n", noiselevel=-1)
4904 def saveNomergeFavorites(self):
4905 """Find atoms in favorites that are not in the mergelist and add them
4906 to the world file if necessary."""
4907 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4908 "--oneshot", "--onlydeps", "--pretend"):
4909 if x in self._frozen_config.myopts:
4911 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4912 world_set = root_config.sets["selected"]
4914 world_locked = False
4915 if hasattr(world_set, "lock"):
4919 if hasattr(world_set, "load"):
4920 world_set.load() # maybe it's changed on disk
4922 args_set = self._dynamic_config._sets["args"]
4923 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
4924 added_favorites = set()
4925 for x in self._dynamic_config._set_nodes:
4926 pkg_type, root, pkg_key, pkg_status = x
4927 if pkg_status != "nomerge":
4931 myfavkey = create_world_atom(x, args_set, root_config)
4933 if myfavkey in added_favorites:
4935 added_favorites.add(myfavkey)
4936 except portage.exception.InvalidDependString as e:
4937 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4938 (pkg_key, str(e)), noiselevel=-1)
4939 writemsg("!!! see '%s'\n\n" % os.path.join(
4940 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4943 for k in self._dynamic_config._sets:
4944 if k in ("args", "selected", "world") or \
4945 not root_config.sets[k].world_candidate:
4950 all_added.append(SETPREFIX + k)
4951 all_added.extend(added_favorites)
4954 print(">>> Recording %s in \"world\" favorites file..." % \
4955 colorize("INFORM", str(a)))
4957 world_set.update(all_added)
4962 def _loadResumeCommand(self, resume_data, skip_masked=True,
4965 Add a resume command to the graph and validate it in the process. This
4966 will raise a PackageNotFound exception if a package is not available.
4971 if not isinstance(resume_data, dict):
4974 mergelist = resume_data.get("mergelist")
4975 if not isinstance(mergelist, list):
4978 fakedb = self._dynamic_config.mydbapi
4979 trees = self._frozen_config.trees
4980 serialized_tasks = []
4983 if not (isinstance(x, list) and len(x) == 4):
4985 pkg_type, myroot, pkg_key, action = x
4986 if pkg_type not in self.pkg_tree_map:
4988 if action != "merge":
4990 root_config = self._frozen_config.roots[myroot]
4992 pkg = self._pkg(pkg_key, pkg_type, root_config)
4993 except portage.exception.PackageNotFound:
4994 # It does no exist or it is corrupt.
4996 # TODO: log these somewhere
5000 if "merge" == pkg.operation and not pkg.visible:
5002 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
5004 self._dynamic_config._unsatisfied_deps_for_display.append(
5005 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5007 fakedb[myroot].cpv_inject(pkg)
5008 serialized_tasks.append(pkg)
5009 self._spinner_update()
5011 if self._dynamic_config._unsatisfied_deps_for_display:
5014 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5015 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5016 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5018 self._select_package = self._select_pkg_from_graph
5019 self._dynamic_config.myparams["selective"] = True
5020 # Always traverse deep dependencies in order to account for
5021 # potentially unsatisfied dependencies of installed packages.
5022 # This is necessary for correct --keep-going or --resume operation
5023 # in case a package from a group of circularly dependent packages
5024 # fails. In this case, a package which has recently been installed
5025 # may have an unsatisfied circular dependency (pulled in by
5026 # PDEPEND, for example). So, even though a package is already
5027 # installed, it may not have all of it's dependencies satisfied, so
5028 # it may not be usable. If such a package is in the subgraph of
5029 # deep depenedencies of a scheduled build, that build needs to
5030 # be cancelled. In order for this type of situation to be
5031 # recognized, deep traversal of dependencies is required.
5032 self._dynamic_config.myparams["deep"] = True
5034 favorites = resume_data.get("favorites")
5035 args_set = self._dynamic_config._sets["args"]
5036 if isinstance(favorites, list):
5037 args = self._load_favorites(favorites)
5041 for task in serialized_tasks:
5042 if isinstance(task, Package) and \
5043 task.operation == "merge":
5044 if not self._add_pkg(task, None):
5047 # Packages for argument atoms need to be explicitly
5048 # added via _add_pkg() so that they are included in the
5049 # digraph (needed at least for --tree display).
5051 for atom in arg.set:
5052 pkg, existing_node = self._select_package(
5053 arg.root_config.root, atom)
5054 if existing_node is None and \
5056 if not self._add_pkg(pkg, Dependency(atom=atom,
5057 root=pkg.root, parent=arg)):
5060 # Allow unsatisfied deps here to avoid showing a masking
5061 # message for an unsatisfied dep that isn't necessarily
5063 if not self._create_graph(allow_unsatisfied=True):
5066 unsatisfied_deps = []
5067 for dep in self._dynamic_config._unsatisfied_deps:
5068 if not isinstance(dep.parent, Package):
5070 if dep.parent.operation == "merge":
5071 unsatisfied_deps.append(dep)
5074 # For unsatisfied deps of installed packages, only account for
5075 # them if they are in the subgraph of dependencies of a package
5076 # which is scheduled to be installed.
5077 unsatisfied_install = False
5079 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5081 node = dep_stack.pop()
5082 if not isinstance(node, Package):
5084 if node.operation == "merge":
5085 unsatisfied_install = True
5087 if node in traversed:
5090 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5092 if unsatisfied_install:
5093 unsatisfied_deps.append(dep)
5095 if masked_tasks or unsatisfied_deps:
5096 # This probably means that a required package
5097 # was dropped via --skipfirst. It makes the
5098 # resume list invalid, so convert it to a
5099 # UnsatisfiedResumeDep exception.
5100 raise self.UnsatisfiedResumeDep(self,
5101 masked_tasks + unsatisfied_deps)
5102 self._dynamic_config._serialized_tasks_cache = None
5105 except self._unknown_internal_error:
5110 def _load_favorites(self, favorites):
5112 Use a list of favorites to resume state from a
5113 previous select_files() call. This creates similar
5114 DependencyArg instances to those that would have
5115 been created by the original select_files() call.
5116 This allows Package instances to be matched with
5117 DependencyArg instances during graph creation.
5119 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5120 getSetAtoms = root_config.setconfig.getSetAtoms
5121 sets = root_config.sets
5124 if not isinstance(x, basestring):
5126 if x in ("system", "world"):
5128 if x.startswith(SETPREFIX):
5129 s = x[len(SETPREFIX):]
5132 if s in self._dynamic_config._sets:
5134 # Recursively expand sets so that containment tests in
5135 # self._get_parent_sets() properly match atoms in nested
5136 # sets (like if world contains system).
5137 expanded_set = InternalPackageSet(
5138 initial_atoms=getSetAtoms(s))
5139 self._dynamic_config._sets[s] = expanded_set
5140 args.append(SetArg(arg=x, set=expanded_set,
5141 root_config=root_config))
5145 except portage.exception.InvalidAtom:
5147 args.append(AtomArg(arg=x, atom=x,
5148 root_config=root_config))
5150 self._set_args(args)
5153 class UnsatisfiedResumeDep(portage.exception.PortageException):
5155 A dependency of a resume list is not installed. This
5156 can occur when a required package is dropped from the
5157 merge list via --skipfirst.
5159 def __init__(self, depgraph, value):
5160 portage.exception.PortageException.__init__(self, value)
5161 self.depgraph = depgraph
5163 class _internal_exception(portage.exception.PortageException):
5164 def __init__(self, value=""):
5165 portage.exception.PortageException.__init__(self, value)
5167 class _unknown_internal_error(_internal_exception):
5169 Used by the depgraph internally to terminate graph creation.
5170 The specific reason for the failure should have been dumped
5171 to stderr, unfortunately, the exact reason for the failure
5175 class _serialize_tasks_retry(_internal_exception):
5177 This is raised by the _serialize_tasks() method when it needs to
5178 be called again for some reason. The only case that it's currently
5179 used for is when neglected dependencies need to be added to the
5180 graph in order to avoid making a potentially unsafe decision.
5183 class _backtrack_mask(_internal_exception):
5185 This is raised by _show_unsatisfied_dep() when it's called with
5186 check_backtrack=True and a matching package has been masked by
5190 def need_restart(self):
5191 return self._dynamic_config._need_restart
5193 def get_runtime_pkg_mask(self):
5194 return self._dynamic_config._runtime_pkg_mask.copy()
5196 class _dep_check_composite_db(portage.dbapi):
5198 A dbapi-like interface that is optimized for use in dep_check() calls.
5199 This is built on top of the existing depgraph package selection logic.
5200 Some packages that have been added to the graph may be masked from this
5201 view in order to influence the atom preference selection that occurs
5204 def __init__(self, depgraph, root):
5205 portage.dbapi.__init__(self)
5206 self._depgraph = depgraph
5208 self._match_cache = {}
5209 self._cpv_pkg_map = {}
5211 def _clear_cache(self):
5212 self._match_cache.clear()
5213 self._cpv_pkg_map.clear()
5215 def match(self, atom):
5216 ret = self._match_cache.get(atom)
5221 atom = self._dep_expand(atom)
5222 pkg, existing = self._depgraph._select_package(self._root, atom)
5226 # Return the highest available from select_package() as well as
5227 # any matching slots in the graph db.
5229 slots.add(pkg.metadata["SLOT"])
5230 if pkg.cp.startswith("virtual/"):
5231 # For new-style virtual lookahead that occurs inside
5232 # dep_check(), examine all slots. This is needed
5233 # so that newer slots will not unnecessarily be pulled in
5234 # when a satisfying lower slot is already installed. For
5235 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5236 # there's no need to pull in a newer slot to satisfy a
5237 # virtual/jdk dependency.
5238 for db, pkg_type, built, installed, db_keys in \
5239 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5240 for cpv in db.match(atom):
5241 if portage.cpv_getkey(cpv) != pkg.cp:
5243 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5245 if self._visible(pkg):
5246 self._cpv_pkg_map[pkg.cpv] = pkg
5248 slots.remove(pkg.metadata["SLOT"])
5250 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5251 pkg, existing = self._depgraph._select_package(
5252 self._root, slot_atom)
5255 if not self._visible(pkg):
5257 self._cpv_pkg_map[pkg.cpv] = pkg
5260 self._cpv_sort_ascending(ret)
5261 self._match_cache[orig_atom] = ret
5264 def _visible(self, pkg):
5265 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5267 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5268 except (StopIteration, portage.exception.InvalidDependString):
5272 if pkg.installed and not pkg.visible:
5274 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5275 self._root].get(pkg.slot_atom)
5276 if in_graph is None:
5277 # Mask choices for packages which are not the highest visible
5278 # version within their slot (since they usually trigger slot
5280 highest_visible, in_graph = self._depgraph._select_package(
5281 self._root, pkg.slot_atom)
5282 # Note: highest_visible is not necessarily the real highest
5283 # visible, especially when --update is not enabled, so use
5284 # < operator instead of !=.
5285 if pkg < highest_visible:
5287 elif in_graph != pkg:
5288 # Mask choices for packages that would trigger a slot
5289 # conflict with a previously selected package.
5293 def _dep_expand(self, atom):
5295 This is only needed for old installed packages that may
5296 contain atoms that are not fully qualified with a specific
5297 category. Emulate the cpv_expand() function that's used by
5298 dbapi.match() in cases like this. If there are multiple
5299 matches, it's often due to a new-style virtual that has
5300 been added, so try to filter those out to avoid raising
5303 root_config = self._depgraph.roots[self._root]
5305 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5306 if len(expanded_atoms) > 1:
5307 non_virtual_atoms = []
5308 for x in expanded_atoms:
5309 if not portage.dep_getkey(x).startswith("virtual/"):
5310 non_virtual_atoms.append(x)
5311 if len(non_virtual_atoms) == 1:
5312 expanded_atoms = non_virtual_atoms
5313 if len(expanded_atoms) > 1:
5314 # compatible with portage.cpv_expand()
5315 raise portage.exception.AmbiguousPackageName(
5316 [portage.dep_getkey(x) for x in expanded_atoms])
5318 atom = expanded_atoms[0]
5320 null_atom = Atom(insert_category_into_atom(atom, "null"))
5321 cat, atom_pn = portage.catsplit(null_atom.cp)
5322 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5324 # Allow the resolver to choose which virtual.
5325 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5330 def aux_get(self, cpv, wants):
5331 metadata = self._cpv_pkg_map[cpv].metadata
5332 return [metadata.get(x, "") for x in wants]
5334 def match_pkgs(self, atom):
5335 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5337 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5339 if "--quiet" in myopts:
5340 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5341 print("!!! one of the following fully-qualified ebuild names instead:\n")
5342 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5343 print(" " + colorize("INFORM", cp))
5346 s = search(root_config, spinner, "--searchdesc" in myopts,
5347 "--quiet" not in myopts, "--usepkg" in myopts,
5348 "--usepkgonly" in myopts)
5349 null_cp = portage.dep_getkey(insert_category_into_atom(
5351 cat, atom_pn = portage.catsplit(null_cp)
5352 s.searchkey = atom_pn
5353 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5356 print("!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg)
5357 print("!!! one of the above fully-qualified ebuild names instead.\n")
5359 def insert_category_into_atom(atom, category):
5360 alphanum = re.search(r'\w', atom)
5362 ret = atom[:alphanum.start()] + "%s/" % category + \
5363 atom[alphanum.start():]
5368 def _spinner_start(spinner, myopts):
5371 if "--quiet" not in myopts and \
5372 ("--pretend" in myopts or "--ask" in myopts or \
5373 "--tree" in myopts or "--verbose" in myopts):
5375 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5377 elif "--buildpkgonly" in myopts:
5381 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5382 if "--unordered-display" in myopts:
5383 portage.writemsg_stdout("\n" + \
5384 darkgreen("These are the packages that " + \
5385 "would be %s:" % action) + "\n\n")
5387 portage.writemsg_stdout("\n" + \
5388 darkgreen("These are the packages that " + \
5389 "would be %s, in reverse order:" % action) + "\n\n")
5391 portage.writemsg_stdout("\n" + \
5392 darkgreen("These are the packages that " + \
5393 "would be %s, in order:" % action) + "\n\n")
5395 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5396 if not show_spinner:
5397 spinner.update = spinner.update_quiet
5400 portage.writemsg_stdout("Calculating dependencies ")
5402 def _spinner_stop(spinner):
5403 if spinner is None or \
5404 spinner.update is spinner.update_quiet:
5407 portage.writemsg_stdout("\b\b... done!\n")
5409 def backtrack_depgraph(settings, trees, myopts, myparams,
5410 myaction, myfiles, spinner):
5412 Raises PackageSetNotFound if myfiles contains a missing package set.
5414 _spinner_start(spinner, myopts)
5416 return _backtrack_depgraph(settings, trees, myopts, myparams,
5417 myaction, myfiles, spinner)
5419 _spinner_stop(spinner)
5421 def _backtrack_depgraph(settings, trees, myopts, myparams,
5422 myaction, myfiles, spinner):
5424 backtrack_max = myopts.get('--backtrack', 5)
5425 runtime_pkg_mask = None
5426 allow_backtracking = backtrack_max > 0
5428 frozen_config = _frozen_depgraph_config(settings, trees,
5431 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5432 frozen_config=frozen_config,
5433 allow_backtracking=allow_backtracking,
5434 runtime_pkg_mask=runtime_pkg_mask)
5435 success, favorites = mydepgraph.select_files(myfiles)
5437 if mydepgraph.need_restart() and backtracked < backtrack_max:
5438 runtime_pkg_mask = mydepgraph.get_runtime_pkg_mask()
5440 elif backtracked and allow_backtracking:
5441 if "--debug" in myopts:
5443 "\n\nbacktracking aborted after %s tries\n\n" % \
5444 backtracked, noiselevel=-1, level=logging.DEBUG)
5445 # Backtracking failed, so disable it and do
5446 # a plain dep calculation + error message.
5447 allow_backtracking = False
5448 runtime_pkg_mask = None
5453 return (success, mydepgraph, favorites)
5455 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5457 Raises PackageSetNotFound if myfiles contains a missing package set.
5459 _spinner_start(spinner, myopts)
5461 return _resume_depgraph(settings, trees, mtimedb, myopts,
5464 _spinner_stop(spinner)
5466 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5468 Construct a depgraph for the given resume list. This will raise
5469 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5470 TODO: Return reasons for dropped_tasks, for display/logging.
5472 @returns: (success, depgraph, dropped_tasks)
5475 skip_unsatisfied = True
5476 mergelist = mtimedb["resume"]["mergelist"]
5477 dropped_tasks = set()
5478 frozen_config = _frozen_depgraph_config(settings, trees,
5481 mydepgraph = depgraph(settings, trees,
5482 myopts, myparams, spinner, frozen_config=frozen_config)
5484 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5485 skip_masked=skip_masked)
5486 except depgraph.UnsatisfiedResumeDep as e:
5487 if not skip_unsatisfied:
5490 graph = mydepgraph._dynamic_config.digraph
5491 unsatisfied_parents = dict((dep.parent, dep.parent) \
5493 traversed_nodes = set()
5494 unsatisfied_stack = list(unsatisfied_parents)
5495 while unsatisfied_stack:
5496 pkg = unsatisfied_stack.pop()
5497 if pkg in traversed_nodes:
5499 traversed_nodes.add(pkg)
5501 # If this package was pulled in by a parent
5502 # package scheduled for merge, removing this
5503 # package may cause the the parent package's
5504 # dependency to become unsatisfied.
5505 for parent_node in graph.parent_nodes(pkg):
5506 if not isinstance(parent_node, Package) \
5507 or parent_node.operation not in ("merge", "nomerge"):
5510 graph.child_nodes(parent_node,
5511 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5512 if pkg in unsatisfied:
5513 unsatisfied_parents[parent_node] = parent_node
5514 unsatisfied_stack.append(parent_node)
5516 pruned_mergelist = []
5518 if isinstance(x, list) and \
5519 tuple(x) not in unsatisfied_parents:
5520 pruned_mergelist.append(x)
5522 # If the mergelist doesn't shrink then this loop is infinite.
5523 if len(pruned_mergelist) == len(mergelist):
5524 # This happens if a package can't be dropped because
5525 # it's already installed, but it has unsatisfied PDEPEND.
5527 mergelist[:] = pruned_mergelist
5529 # Exclude installed packages that have been removed from the graph due
5530 # to failure to build/install runtime dependencies after the dependent
5531 # package has already been installed.
5532 dropped_tasks.update(pkg for pkg in \
5533 unsatisfied_parents if pkg.operation != "nomerge")
5534 mydepgraph.break_refs(unsatisfied_parents)
5536 del e, graph, traversed_nodes, \
5537 unsatisfied_parents, unsatisfied_stack
5541 return (success, mydepgraph, dropped_tasks)
5543 def get_mask_info(root_config, cpv, pkgsettings,
5544 db, pkg_type, built, installed, db_keys):
5547 metadata = dict(zip(db_keys,
5548 db.aux_get(cpv, db_keys)))
5552 if metadata is None:
5553 mreasons = ["corruption"]
5555 eapi = metadata['EAPI']
5558 if not portage.eapi_is_supported(eapi):
5559 mreasons = ['EAPI %s' % eapi]
5561 pkg = Package(type_name=pkg_type, root_config=root_config,
5562 cpv=cpv, built=built, installed=installed, metadata=metadata)
5563 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5564 return metadata, mreasons
5566 def show_masked_packages(masked_packages):
5567 shown_licenses = set()
5568 shown_comments = set()
5569 # Maybe there is both an ebuild and a binary. Only
5570 # show one of them to avoid redundant appearance.
5572 have_eapi_mask = False
5573 for (root_config, pkgsettings, cpv,
5574 metadata, mreasons) in masked_packages:
5575 if cpv in shown_cpvs:
5578 comment, filename = None, None
5579 if "package.mask" in mreasons:
5580 comment, filename = \
5581 portage.getmaskingreason(
5582 cpv, metadata=metadata,
5583 settings=pkgsettings,
5584 portdb=root_config.trees["porttree"].dbapi,
5585 return_location=True)
5586 missing_licenses = []
5588 if not portage.eapi_is_supported(metadata["EAPI"]):
5589 have_eapi_mask = True
5591 missing_licenses = \
5592 pkgsettings._getMissingLicenses(
5594 except portage.exception.InvalidDependString:
5595 # This will have already been reported
5596 # above via mreasons.
5599 print("- "+cpv+" (masked by: "+", ".join(mreasons)+")")
5601 if comment and comment not in shown_comments:
5602 writemsg_stdout(filename + ":\n" + comment + "\n",
5604 shown_comments.add(comment)
5605 portdb = root_config.trees["porttree"].dbapi
5606 for l in missing_licenses:
5607 l_path = portdb.findLicensePath(l)
5608 if l in shown_licenses:
5610 msg = ("A copy of the '%s' license" + \
5611 " is located at '%s'.") % (l, l_path)
5614 shown_licenses.add(l)
5615 return have_eapi_mask
5617 def show_mask_docs():
5618 print("For more information, see the MASKED PACKAGES section in the emerge")
5619 print("man page or refer to the Gentoo Handbook.")
5621 def filter_iuse_defaults(iuse):
5623 if flag.startswith("+") or flag.startswith("-"):
5628 def show_blocker_docs_link():
5630 print("For more information about " + bad("Blocked Packages") + ", please refer to the following")
5631 print("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):")
5633 print("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked")
5636 def get_masking_status(pkg, pkgsettings, root_config):
5638 mreasons = portage.getmaskingstatus(
5639 pkg, settings=pkgsettings,
5640 portdb=root_config.trees["porttree"].dbapi)
5642 if not pkg.installed:
5643 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5644 mreasons.append("CHOST: %s" % \
5645 pkg.metadata["CHOST"])
5647 for msg_type, msgs in pkg.invalid.items():
5649 mreasons.append("invalid: %s" % (msg,))
5651 if not pkg.metadata["SLOT"]:
5652 mreasons.append("invalid: SLOT is undefined")