1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
12 from itertools import chain
15 from portage import os, OrderedDict
16 from portage import _unicode_decode
17 from portage.const import PORTAGE_PACKAGE_ATOM
18 from portage.dbapi import dbapi
19 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use
20 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
21 from portage.exception import InvalidAtom
22 from portage.output import colorize, create_color_func, \
24 bad = create_color_func("BAD")
25 from portage.package.ebuild.getmaskingstatus import \
26 _getmaskingstatus, _MaskReason
27 from portage._sets import SETPREFIX
28 from portage._sets.base import InternalPackageSet
29 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
30 from portage.util import writemsg_level
31 from portage.util.digraph import digraph
33 from _emerge.AtomArg import AtomArg
34 from _emerge.Blocker import Blocker
35 from _emerge.BlockerCache import BlockerCache
36 from _emerge.BlockerDepPriority import BlockerDepPriority
37 from _emerge.countdown import countdown
38 from _emerge.create_world_atom import create_world_atom
39 from _emerge.Dependency import Dependency
40 from _emerge.DependencyArg import DependencyArg
41 from _emerge.DepPriority import DepPriority
42 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
43 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
44 from _emerge.FakeVartree import FakeVartree
45 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
46 from _emerge.is_valid_package_atom import is_valid_package_atom
47 from _emerge.Package import Package
48 from _emerge.PackageArg import PackageArg
49 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
50 from _emerge.RootConfig import RootConfig
51 from _emerge.search import search
52 from _emerge.SetArg import SetArg
53 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
54 from _emerge.UnmergeDepPriority import UnmergeDepPriority
55 from _emerge.UseFlagDisplay import pkg_use_display
57 from _emerge.resolver.slot_collision import slot_conflict_handler
58 from _emerge.resolver.circular_dependency import circular_dependency_handler
59 from _emerge.resolver.output import display
61 if sys.hexversion >= 0x3000000:
65 class _scheduler_graph_config(object):
66 def __init__(self, trees, pkg_cache, graph, mergelist):
68 self.pkg_cache = pkg_cache
70 self.mergelist = mergelist
72 class _frozen_depgraph_config(object):
74 def __init__(self, settings, trees, myopts, spinner):
75 self.settings = settings
76 self.target_root = settings["ROOT"]
79 if settings.get("PORTAGE_DEBUG", "") == "1":
81 self.spinner = spinner
82 self._running_root = trees["/"]["root_config"]
83 self._opts_no_restart = frozenset(["--buildpkgonly",
84 "--fetchonly", "--fetch-all-uri", "--pretend"])
87 self._trees_orig = trees
89 # All Package instances
91 self._highest_license_masked = {}
93 self.trees[myroot] = {}
94 # Create a RootConfig instance that references
95 # the FakeVartree instead of the real one.
96 self.roots[myroot] = RootConfig(
97 trees[myroot]["vartree"].settings,
99 trees[myroot]["root_config"].setconfig)
100 for tree in ("porttree", "bintree"):
101 self.trees[myroot][tree] = trees[myroot][tree]
102 self.trees[myroot]["vartree"] = \
103 FakeVartree(trees[myroot]["root_config"],
104 pkg_cache=self._pkg_cache,
105 pkg_root_config=self.roots[myroot])
106 self.pkgsettings[myroot] = portage.config(
107 clone=self.trees[myroot]["vartree"].settings)
109 self._required_set_names = set(["world"])
111 self.excluded_pkgs = InternalPackageSet(allow_wildcard=True)
112 for x in ' '.join(myopts.get("--exclude", [])).split():
114 x = Atom(x, allow_wildcard=True)
115 except portage.exception.InvalidAtom:
116 x = Atom("*/" + x, allow_wildcard=True)
117 self.excluded_pkgs.add(x)
119 class _depgraph_sets(object):
121 # contains all sets added to the graph
123 # contains non-set atoms given as arguments
124 self.sets['__non_set_args__'] = InternalPackageSet()
125 # contains all atoms from all sets added to the graph, including
126 # atoms given as arguments
127 self.atoms = InternalPackageSet()
128 self.atom_arg_map = {}
130 class _dynamic_depgraph_config(object):
132 def __init__(self, depgraph, myparams, allow_backtracking,
133 runtime_pkg_mask, needed_unstable_keywords, needed_use_config_changes, needed_license_changes):
134 self.myparams = myparams.copy()
135 self._vdb_loaded = False
136 self._allow_backtracking = allow_backtracking
137 # Maps slot atom to package for each Package added to the graph.
138 self._slot_pkg_map = {}
139 # Maps nodes to the reasons they were selected for reinstallation.
140 self._reinstall_nodes = {}
142 # Contains a filtered view of preferred packages that are selected
143 # from available repositories.
144 self._filtered_trees = {}
145 # Contains installed packages and new packages that have been added
147 self._graph_trees = {}
148 # Caches visible packages returned from _select_package, for use in
149 # depgraph._iter_atoms_for_pkg() SLOT logic.
150 self._visible_pkgs = {}
151 #contains the args created by select_files
152 self._initial_arg_list = []
153 self.digraph = portage.digraph()
154 # manages sets added to the graph
156 # contains all nodes pulled in by self.sets
157 self._set_nodes = set()
158 # Contains only Blocker -> Uninstall edges
159 self._blocker_uninstalls = digraph()
160 # Contains only Package -> Blocker edges
161 self._blocker_parents = digraph()
162 # Contains only irrelevant Package -> Blocker edges
163 self._irrelevant_blockers = digraph()
164 # Contains only unsolvable Package -> Blocker edges
165 self._unsolvable_blockers = digraph()
166 # Contains all Blocker -> Blocked Package edges
167 self._blocked_pkgs = digraph()
168 # Contains world packages that have been protected from
169 # uninstallation but may not have been added to the graph
170 # if the graph is not complete yet.
171 self._blocked_world_pkgs = {}
172 # Contains packages whose dependencies have been traversed.
173 # This use used to check if we have accounted for blockers
174 # relevant to a package.
175 self._traversed_pkg_deps = set()
176 self._slot_collision_info = {}
177 # Slot collision nodes are not allowed to block other packages since
178 # blocker validation is only able to account for one package per slot.
179 self._slot_collision_nodes = set()
180 self._parent_atoms = {}
181 self._slot_conflict_parent_atoms = set()
182 self._slot_conflict_handler = None
183 self._circular_dependency_handler = None
184 self._serialized_tasks_cache = None
185 self._scheduler_graph = None
186 self._displayed_list = None
187 self._pprovided_args = []
188 self._missing_args = []
189 self._masked_installed = set()
190 self._masked_license_updates = set()
191 self._unsatisfied_deps_for_display = []
192 self._unsatisfied_blockers_for_display = None
193 self._circular_deps_for_display = None
195 self._dep_disjunctive_stack = []
196 self._unsatisfied_deps = []
197 self._initially_unsatisfied_deps = []
198 self._ignored_deps = []
199 self._highest_pkg_cache = {}
201 if runtime_pkg_mask is None:
202 runtime_pkg_mask = {}
204 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
205 runtime_pkg_mask.items())
207 if needed_unstable_keywords is None:
208 self._needed_unstable_keywords = set()
210 self._needed_unstable_keywords = needed_unstable_keywords.copy()
212 if needed_license_changes is None:
213 self._needed_license_changes = {}
215 self._needed_license_changes = needed_license_changes.copy()
217 if needed_use_config_changes is None:
218 self._needed_use_config_changes = {}
220 self._needed_use_config_changes = \
221 dict((k.copy(), (v[0].copy(), v[1].copy())) for (k, v) in \
222 needed_use_config_changes.items())
224 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True
226 self._runtime_pkg_mask = runtime_pkg_mask
227 self._need_restart = False
228 # For conditions that always require user intervention, such as
229 # unsatisfied REQUIRED_USE (currently has no autounmask support).
230 self._skip_restart = False
232 for myroot in depgraph._frozen_config.trees:
233 self.sets[myroot] = _depgraph_sets()
234 self._slot_pkg_map[myroot] = {}
235 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
236 # This dbapi instance will model the state that the vdb will
237 # have after new packages have been installed.
238 fakedb = PackageVirtualDbapi(vardb.settings)
240 self.mydbapi[myroot] = fakedb
243 graph_tree.dbapi = fakedb
244 self._graph_trees[myroot] = {}
245 self._filtered_trees[myroot] = {}
246 # Substitute the graph tree for the vartree in dep_check() since we
247 # want atom selections to be consistent with package selections
248 # have already been made.
249 self._graph_trees[myroot]["porttree"] = graph_tree
250 self._graph_trees[myroot]["vartree"] = graph_tree
253 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
254 self._filtered_trees[myroot]["porttree"] = filtered_tree
255 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
257 # Passing in graph_tree as the vartree here could lead to better
258 # atom selections in some cases by causing atoms for packages that
259 # have been added to the graph to be preferred over other choices.
260 # However, it can trigger atom selections that result in
261 # unresolvable direct circular dependencies. For example, this
262 # happens with gwydion-dylan which depends on either itself or
263 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
264 # gwydion-dylan-bin needs to be selected in order to avoid a
265 # an unresolvable direct circular dependency.
267 # To solve the problem described above, pass in "graph_db" so that
268 # packages that have been added to the graph are distinguishable
269 # from other available packages and installed packages. Also, pass
270 # the parent package into self._select_atoms() calls so that
271 # unresolvable direct circular dependencies can be detected and
272 # avoided when possible.
273 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
274 self._filtered_trees[myroot]["vartree"] = \
275 depgraph._frozen_config.trees[myroot]["vartree"]
278 # (db, pkg_type, built, installed, db_keys)
279 if "remove" in self.myparams:
280 # For removal operations, use _dep_check_composite_db
281 # for availability and visibility checks. This provides
282 # consistency with install operations, so we don't
283 # get install/uninstall cycles like in bug #332719.
284 self._graph_trees[myroot]["porttree"] = filtered_tree
286 if "--usepkgonly" not in depgraph._frozen_config.myopts:
287 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
288 db_keys = list(portdb._aux_cache_keys)
289 dbs.append((portdb, "ebuild", False, False, db_keys))
291 if "--usepkg" in depgraph._frozen_config.myopts:
292 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
293 db_keys = list(bindb._aux_cache_keys)
294 dbs.append((bindb, "binary", True, False, db_keys))
296 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
297 db_keys = list(depgraph._frozen_config._trees_orig[myroot
298 ]["vartree"].dbapi._aux_cache_keys)
299 dbs.append((vardb, "installed", True, True, db_keys))
300 self._filtered_trees[myroot]["dbs"] = dbs
302 class depgraph(object):
304 pkg_tree_map = RootConfig.pkg_tree_map
306 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
308 def __init__(self, settings, trees, myopts, myparams, spinner,
309 frozen_config=None, runtime_pkg_mask=None, needed_unstable_keywords=None, \
310 needed_use_config_changes=None, needed_license_changes=None, allow_backtracking=False):
311 if frozen_config is None:
312 frozen_config = _frozen_depgraph_config(settings, trees,
314 self._frozen_config = frozen_config
315 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
316 allow_backtracking, runtime_pkg_mask, needed_unstable_keywords, \
317 needed_use_config_changes, needed_license_changes)
319 self._select_atoms = self._select_atoms_highest_available
320 self._select_package = self._select_pkg_highest_available
324 Load installed package metadata if appropriate. This used to be called
325 from the constructor, but that wasn't very nice since this procedure
326 is slow and it generates spinner output. So, now it's called on-demand
327 by various methods when necessary.
330 if self._dynamic_config._vdb_loaded:
333 for myroot in self._frozen_config.trees:
335 preload_installed_pkgs = \
336 "--nodeps" not in self._frozen_config.myopts
338 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
339 if not fake_vartree.dbapi:
340 # This needs to be called for the first depgraph, but not for
341 # backtracking depgraphs that share the same frozen_config.
344 # FakeVartree.sync() populates virtuals, and we want
345 # self.pkgsettings to have them populated too.
346 self._frozen_config.pkgsettings[myroot] = \
347 portage.config(clone=fake_vartree.settings)
349 if preload_installed_pkgs:
350 vardb = fake_vartree.dbapi
351 fakedb = self._dynamic_config._graph_trees[
352 myroot]["vartree"].dbapi
355 self._spinner_update()
356 # This triggers metadata updates via FakeVartree.
357 vardb.aux_get(pkg.cpv, [])
358 fakedb.cpv_inject(pkg)
360 # Now that the vardb state is cached in our FakeVartree,
361 # we won't be needing the real vartree cache for awhile.
362 # To make some room on the heap, clear the vardbapi
364 self._frozen_config._trees_orig[myroot
365 ]["vartree"].dbapi._clear_cache()
368 self._dynamic_config._vdb_loaded = True
370 def _spinner_update(self):
371 if self._frozen_config.spinner:
372 self._frozen_config.spinner.update()
374 def _show_missed_update(self):
376 # In order to minimize noise, show only the highest
377 # missed update from each SLOT.
379 for pkg, mask_reasons in \
380 self._dynamic_config._runtime_pkg_mask.items():
382 # Exclude installed here since we only
383 # want to show available updates.
385 k = (pkg.root, pkg.slot_atom)
386 if k in missed_updates:
387 other_pkg, mask_type, parent_atoms = missed_updates[k]
390 for mask_type, parent_atoms in mask_reasons.items():
393 missed_updates[k] = (pkg, mask_type, parent_atoms)
396 if not missed_updates:
399 missed_update_types = {}
400 for pkg, mask_type, parent_atoms in missed_updates.values():
401 missed_update_types.setdefault(mask_type,
402 []).append((pkg, parent_atoms))
404 if '--quiet' in self._frozen_config.myopts and \
405 '--debug' not in self._frozen_config.myopts:
406 missed_update_types.pop("slot conflict", None)
407 missed_update_types.pop("missing dependency", None)
409 self._show_missed_update_slot_conflicts(
410 missed_update_types.get("slot conflict"))
412 self._show_missed_update_unsatisfied_dep(
413 missed_update_types.get("missing dependency"))
415 def _show_missed_update_unsatisfied_dep(self, missed_updates):
417 if not missed_updates:
420 backtrack_masked = []
422 for pkg, parent_atoms in missed_updates:
425 for parent, root, atom in parent_atoms:
426 self._show_unsatisfied_dep(root, atom, myparent=parent,
427 check_backtrack=True)
428 except self._backtrack_mask:
429 # This is displayed below in abbreviated form.
430 backtrack_masked.append((pkg, parent_atoms))
433 writemsg("\n!!! The following update has been skipped " + \
434 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
436 writemsg(str(pkg.slot_atom), noiselevel=-1)
438 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
439 writemsg("\n", noiselevel=-1)
441 for parent, root, atom in parent_atoms:
442 self._show_unsatisfied_dep(root, atom, myparent=parent)
443 writemsg("\n", noiselevel=-1)
446 # These are shown in abbreviated form, in order to avoid terminal
447 # flooding from mask messages as reported in bug #285832.
448 writemsg("\n!!! The following update(s) have been skipped " + \
449 "due to unsatisfied dependencies\n" + \
450 "!!! triggered by backtracking:\n\n", noiselevel=-1)
451 for pkg, parent_atoms in backtrack_masked:
452 writemsg(str(pkg.slot_atom), noiselevel=-1)
454 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
455 writemsg("\n", noiselevel=-1)
457 def _show_missed_update_slot_conflicts(self, missed_updates):
459 if not missed_updates:
463 msg.append("\n!!! One or more updates have been skipped due to " + \
464 "a dependency conflict:\n\n")
467 for pkg, parent_atoms in missed_updates:
468 msg.append(str(pkg.slot_atom))
470 msg.append(" for %s" % (pkg.root,))
473 for parent, atom in parent_atoms:
477 msg.append(" conflicts with\n")
479 if isinstance(parent,
480 (PackageArg, AtomArg)):
481 # For PackageArg and AtomArg types, it's
482 # redundant to display the atom attribute.
483 msg.append(str(parent))
485 # Display the specific atom from SetArg or
487 msg.append("%s required by %s" % (atom, parent))
491 writemsg("".join(msg), noiselevel=-1)
493 def _show_slot_collision_notice(self):
494 """Show an informational message advising the user to mask one of the
495 the packages. In some cases it may be possible to resolve this
496 automatically, but support for backtracking (removal nodes that have
497 already been selected) will be required in order to handle all possible
501 if not self._dynamic_config._slot_collision_info:
504 self._show_merge_list()
506 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
507 handler = self._dynamic_config._slot_conflict_handler
509 conflict = handler.get_conflict()
510 writemsg(conflict, noiselevel=-1)
512 explanation = handler.get_explanation()
514 writemsg(explanation, noiselevel=-1)
517 if "--quiet" in self._frozen_config.myopts:
521 msg.append("It may be possible to solve this problem ")
522 msg.append("by using package.mask to prevent one of ")
523 msg.append("those packages from being selected. ")
524 msg.append("However, it is also possible that conflicting ")
525 msg.append("dependencies exist such that they are impossible to ")
526 msg.append("satisfy simultaneously. If such a conflict exists in ")
527 msg.append("the dependencies of two different packages, then those ")
528 msg.append("packages can not be installed simultaneously.")
529 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
530 if not self._dynamic_config._allow_backtracking and \
531 (backtrack_opt is None or \
532 (backtrack_opt > 0 and backtrack_opt < 30)):
533 msg.append(" You may want to try a larger value of the ")
534 msg.append("--backtrack option, such as --backtrack=30, ")
535 msg.append("in order to see if that will solve this conflict ")
536 msg.append("automatically.")
538 for line in textwrap.wrap(''.join(msg), 70):
539 writemsg(line + '\n', noiselevel=-1)
540 writemsg('\n', noiselevel=-1)
543 msg.append("For more information, see MASKED PACKAGES ")
544 msg.append("section in the emerge man page or refer ")
545 msg.append("to the Gentoo Handbook.")
546 for line in textwrap.wrap(''.join(msg), 70):
547 writemsg(line + '\n', noiselevel=-1)
548 writemsg('\n', noiselevel=-1)
550 def _process_slot_conflicts(self):
552 Process slot conflict data to identify specific atoms which
553 lead to conflict. These atoms only match a subset of the
554 packages that have been pulled into a given slot.
556 for (slot_atom, root), slot_nodes \
557 in self._dynamic_config._slot_collision_info.items():
559 all_parent_atoms = set()
560 for pkg in slot_nodes:
561 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
564 all_parent_atoms.update(parent_atoms)
566 for pkg in slot_nodes:
567 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
568 if parent_atoms is None:
570 self._dynamic_config._parent_atoms[pkg] = parent_atoms
571 for parent_atom in all_parent_atoms:
572 if parent_atom in parent_atoms:
574 # Use package set for matching since it will match via
575 # PROVIDE when necessary, while match_from_list does not.
576 parent, atom = parent_atom
577 atom_set = InternalPackageSet(
578 initial_atoms=(atom,))
579 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
580 parent_atoms.add(parent_atom)
582 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
584 def _reinstall_for_flags(self, forced_flags,
585 orig_use, orig_iuse, cur_use, cur_iuse):
586 """Return a set of flags that trigger reinstallation, or None if there
587 are no such flags."""
588 if "--newuse" in self._frozen_config.myopts or \
589 "--binpkg-respect-use" in self._frozen_config.myopts:
590 flags = set(orig_iuse.symmetric_difference(
591 cur_iuse).difference(forced_flags))
592 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
593 cur_iuse.intersection(cur_use)))
596 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
597 flags = orig_iuse.intersection(orig_use).symmetric_difference(
598 cur_iuse.intersection(cur_use))
603 def _create_graph(self, allow_unsatisfied=False):
604 dep_stack = self._dynamic_config._dep_stack
605 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
606 while dep_stack or dep_disjunctive_stack:
607 self._spinner_update()
609 dep = dep_stack.pop()
610 if isinstance(dep, Package):
611 if not self._add_pkg_deps(dep,
612 allow_unsatisfied=allow_unsatisfied):
615 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
617 if dep_disjunctive_stack:
618 if not self._pop_disjunction(allow_unsatisfied):
622 def _expand_set_args(self, input_args, add_to_digraph=False):
624 Iterate over a list of DependencyArg instances and yield all
625 instances given in the input together with additional SetArg
626 instances that are generated from nested sets.
627 @param input_args: An iterable of DependencyArg instances
628 @type input_args: Iterable
629 @param add_to_digraph: If True then add SetArg instances
630 to the digraph, in order to record parent -> child
631 relationships from nested sets
632 @type add_to_digraph: Boolean
634 @returns: All args given in the input together with additional
635 SetArg instances that are generated from nested sets
638 traversed_set_args = set()
640 for arg in input_args:
641 if not isinstance(arg, SetArg):
645 root_config = arg.root_config
646 depgraph_sets = self._dynamic_config.sets[root_config.root]
649 arg = arg_stack.pop()
650 if arg in traversed_set_args:
652 traversed_set_args.add(arg)
655 # Traverse nested sets and add them to the stack
656 # if they're not already in the graph. Also, graph
657 # edges between parent and nested sets.
658 for token in arg.pset.getNonAtoms():
659 if not token.startswith(SETPREFIX):
661 s = token[len(SETPREFIX):]
662 nested_set = depgraph_sets.sets.get(s)
663 if nested_set is None:
664 nested_set = root_config.sets.get(s)
665 if nested_set is not None:
666 nested_arg = SetArg(arg=token, pset=nested_set,
667 root_config=root_config)
668 arg_stack.append(nested_arg)
670 self._dynamic_config.digraph.add(nested_arg, arg,
671 priority=BlockerDepPriority.instance)
672 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
674 def _add_dep(self, dep, allow_unsatisfied=False):
675 debug = "--debug" in self._frozen_config.myopts
676 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
677 nodeps = "--nodeps" in self._frozen_config.myopts
678 empty = "empty" in self._dynamic_config.myparams
679 deep = self._dynamic_config.myparams.get("deep", 0)
680 recurse = empty or deep is True or dep.depth <= deep
682 if not buildpkgonly and \
684 dep.parent not in self._dynamic_config._slot_collision_nodes:
685 if dep.parent.onlydeps:
686 # It's safe to ignore blockers if the
687 # parent is an --onlydeps node.
689 # The blocker applies to the root where
690 # the parent is or will be installed.
691 blocker = Blocker(atom=dep.atom,
692 eapi=dep.parent.metadata["EAPI"],
693 priority=dep.priority, root=dep.parent.root)
694 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
697 if dep.child is None:
698 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
699 onlydeps=dep.onlydeps)
701 # The caller has selected a specific package
702 # via self._minimize_packages().
704 existing_node = self._dynamic_config._slot_pkg_map[
705 dep.root].get(dep_pkg.slot_atom)
708 if dep.priority.optional:
709 # This could be an unnecessary build-time dep
710 # pulled in by --with-bdeps=y.
712 if allow_unsatisfied:
713 self._dynamic_config._unsatisfied_deps.append(dep)
715 self._dynamic_config._unsatisfied_deps_for_display.append(
716 ((dep.root, dep.atom), {"myparent":dep.parent}))
718 # The parent node should not already be in
719 # runtime_pkg_mask, since that would trigger an
720 # infinite backtracking loop.
721 if self._dynamic_config._allow_backtracking:
722 if dep.parent in self._dynamic_config._runtime_pkg_mask:
723 if "--debug" in self._frozen_config.myopts:
725 "!!! backtracking loop detected: %s %s\n" % \
727 self._dynamic_config._runtime_pkg_mask[
728 dep.parent]), noiselevel=-1)
730 # Do not backtrack if only USE have to be changed in
731 # order to satisfy the dependency.
732 dep_pkg, existing_node = \
733 self._select_package(dep.root, dep.atom.without_use,
734 onlydeps=dep.onlydeps)
736 self._dynamic_config._runtime_pkg_mask.setdefault(
737 dep.parent, {})["missing dependency"] = \
738 set([(dep.parent, dep.root, dep.atom)])
739 self._dynamic_config._need_restart = True
740 if "--debug" in self._frozen_config.myopts:
744 msg.append("backtracking due to unsatisfied dep:")
745 msg.append(" parent: %s" % dep.parent)
746 msg.append(" priority: %s" % dep.priority)
747 msg.append(" root: %s" % dep.root)
748 msg.append(" atom: %s" % dep.atom)
750 writemsg_level("".join("%s\n" % l for l in msg),
751 noiselevel=-1, level=logging.DEBUG)
755 if not self._add_pkg(dep_pkg, dep):
759 def _check_slot_conflict(self, pkg, atom):
760 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
763 matches = pkg.cpv == existing_node.cpv
764 if pkg != existing_node and \
766 # Use package set for matching since it will match via
767 # PROVIDE when necessary, while match_from_list does not.
768 matches = bool(InternalPackageSet(initial_atoms=(atom,),
769 ).findAtomForPackage(existing_node,
770 modified_use=self._pkg_use_enabled(existing_node)))
772 return (existing_node, matches)
774 def _add_pkg(self, pkg, dep):
781 myparent = dep.parent
782 priority = dep.priority
785 priority = DepPriority()
787 Fills the digraph with nodes comprised of packages to merge.
788 mybigkey is the package spec of the package to merge.
789 myparent is the package depending on mybigkey ( or None )
790 addme = Should we add this package to the digraph or are we just looking at it's deps?
791 Think --onlydeps, we need to ignore packages in that case.
794 #IUSE-aware emerge -> USE DEP aware depgraph
795 #"no downgrade" emerge
797 # Ensure that the dependencies of the same package
798 # are never processed more than once.
799 previously_added = pkg in self._dynamic_config.digraph
801 # select the correct /var database that we'll be checking against
802 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
803 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
808 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
809 except portage.exception.InvalidDependString as e:
810 if not pkg.installed:
811 # should have been masked before it was selected
815 # NOTE: REQUIRED_USE checks are delayed until after
816 # package selection, since we want to prompt the user
817 # for USE adjustment rather than have REQUIRED_USE
818 # affect package selection and || dep choices.
819 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
820 eapi_has_required_use(pkg.metadata["EAPI"]):
821 required_use_is_sat = check_required_use(
822 pkg.metadata["REQUIRED_USE"],
823 self._pkg_use_enabled(pkg),
824 pkg.iuse.is_valid_flag)
825 if not required_use_is_sat:
826 if dep.atom is not None and dep.parent is not None:
827 self._add_parent_atom(pkg, (dep.parent, dep.atom))
830 for parent_atom in arg_atoms:
831 parent, atom = parent_atom
832 self._add_parent_atom(pkg, parent_atom)
836 atom = Atom("=" + pkg.cpv)
837 self._dynamic_config._unsatisfied_deps_for_display.append(
838 ((pkg.root, atom), {"myparent":dep.parent}))
839 self._dynamic_config._skip_restart = True
843 if not pkg.installed and \
844 "empty" not in self._dynamic_config.myparams and \
845 vardbapi.match(pkg.slot_atom):
846 # Increase the priority of dependencies on packages that
847 # are being rebuilt. This optimizes merge order so that
848 # dependencies are rebuilt/updated as soon as possible,
849 # which is needed especially when emerge is called by
850 # revdep-rebuild since dependencies may be affected by ABI
851 # breakage that has rendered them useless. Don't adjust
852 # priority here when in "empty" mode since all packages
853 # are being merged in that case.
854 priority.rebuild = True
856 existing_node, existing_node_matches = \
857 self._check_slot_conflict(pkg, dep.atom)
858 slot_collision = False
860 if existing_node_matches:
861 # The existing node can be reused.
863 for parent_atom in arg_atoms:
864 parent, atom = parent_atom
865 self._dynamic_config.digraph.add(existing_node, parent,
867 self._add_parent_atom(existing_node, parent_atom)
868 # If a direct circular dependency is not an unsatisfied
869 # buildtime dependency then drop it here since otherwise
870 # it can skew the merge order calculation in an unwanted
872 if existing_node != myparent or \
873 (priority.buildtime and not priority.satisfied):
874 self._dynamic_config.digraph.addnode(existing_node, myparent,
876 if dep.atom is not None and dep.parent is not None:
877 self._add_parent_atom(existing_node,
878 (dep.parent, dep.atom))
881 # A slot conflict has occurred.
882 # The existing node should not already be in
883 # runtime_pkg_mask, since that would trigger an
884 # infinite backtracking loop.
885 if self._dynamic_config._allow_backtracking and \
887 self._dynamic_config._runtime_pkg_mask:
888 if "--debug" in self._frozen_config.myopts:
890 "!!! backtracking loop detected: %s %s\n" % \
892 self._dynamic_config._runtime_pkg_mask[
893 existing_node]), noiselevel=-1)
894 elif self._dynamic_config._allow_backtracking and \
895 not self._accept_blocker_conflicts():
896 self._add_slot_conflict(pkg)
897 if dep.atom is not None and dep.parent is not None:
898 self._add_parent_atom(pkg, (dep.parent, dep.atom))
900 for parent_atom in arg_atoms:
901 parent, atom = parent_atom
902 self._add_parent_atom(pkg, parent_atom)
903 self._process_slot_conflicts()
908 # The ordering of backtrack_data can make
909 # a difference here, because both mask actions may lead
910 # to valid, but different, solutions and the one with
911 # 'existing_node' masked is usually the better one. Because
912 # of that, we choose an order such that
913 # the backtracker will first explore the choice with
914 # existing_node masked. The backtracker reverses the
915 # order, so the order it uses is the reverse of the
916 # order shown here. See bug #339606.
917 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
918 # For missed update messages, find out which
919 # atoms matched to_be_selected that did not
920 # match to_be_masked.
922 self._dynamic_config._parent_atoms.get(to_be_selected, set())
924 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
926 parent_atoms = conflict_atoms
928 all_parents.update(parent_atoms)
931 for parent, atom in parent_atoms:
932 i = InternalPackageSet(initial_atoms=(atom,))
933 if not i.findAtomForPackage(to_be_masked):
937 if to_be_selected >= to_be_masked:
938 # We only care about the parent atoms
939 # when they trigger a downgrade.
942 fallback_data.append((to_be_masked, parent_atoms))
945 # 'to_be_masked' does not violate any parent atom, which means
946 # there is no point in masking it.
949 backtrack_data.append((to_be_masked, parent_atoms))
951 if not backtrack_data:
952 # This shouldn't happen, but fall back to the old
953 # behavior if this gets triggered somehow.
954 backtrack_data = fallback_data
956 if len(backtrack_data) > 1:
957 # NOTE: Generally, we prefer to mask the higher
958 # version since this solves common cases in which a
959 # lower version is needed so that all dependencies
960 # will be satisfied (bug #337178). However, if
961 # existing_node happens to be installed then we
962 # mask that since this is a common case that is
963 # triggered when --update is not enabled.
964 if existing_node.installed:
966 elif pkg > existing_node:
967 backtrack_data.reverse()
969 to_be_masked, parent_atoms = backtrack_data[-1]
971 self._dynamic_config._runtime_pkg_mask.setdefault(
972 to_be_masked, {})["slot conflict"] = parent_atoms
973 self._dynamic_config._need_restart = True
974 if "--debug" in self._frozen_config.myopts:
978 msg.append("backtracking due to slot conflict:")
979 if backtrack_data is fallback_data:
980 msg.append("!!! backtrack_data fallback")
981 msg.append(" first package: %s" % existing_node)
982 msg.append(" second package: %s" % pkg)
983 msg.append(" package to mask: %s" % to_be_masked)
984 msg.append(" slot: %s" % pkg.slot_atom)
985 msg.append(" parents: %s" % ", ".join( \
986 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
988 writemsg_level("".join("%s\n" % l for l in msg),
989 noiselevel=-1, level=logging.DEBUG)
992 # A slot collision has occurred. Sometimes this coincides
993 # with unresolvable blockers, so the slot collision will be
994 # shown later if there are no unresolvable blockers.
995 self._add_slot_conflict(pkg)
996 slot_collision = True
999 # Now add this node to the graph so that self.display()
1000 # can show use flags and --tree portage.output. This node is
1001 # only being partially added to the graph. It must not be
1002 # allowed to interfere with the other nodes that have been
1003 # added. Do not overwrite data for existing nodes in
1004 # self._dynamic_config.mydbapi since that data will be used for blocker
1006 # Even though the graph is now invalid, continue to process
1007 # dependencies so that things like --fetchonly can still
1008 # function despite collisions.
1010 elif not previously_added:
1011 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1012 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1013 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1014 self._dynamic_config._highest_pkg_cache.clear()
1015 self._check_masks(pkg)
1017 if not pkg.installed:
1018 # Allow this package to satisfy old-style virtuals in case it
1019 # doesn't already. Any pre-existing providers will be preferred
1022 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1023 # For consistency, also update the global virtuals.
1024 settings = self._frozen_config.roots[pkg.root].settings
1026 settings.setinst(pkg.cpv, pkg.metadata)
1028 except portage.exception.InvalidDependString as e:
1029 if not pkg.installed:
1030 # should have been masked before it was selected
1034 self._dynamic_config._set_nodes.add(pkg)
1036 # Do this even when addme is False (--onlydeps) so that the
1037 # parent/child relationship is always known in case
1038 # self._show_slot_collision_notice() needs to be called later.
1039 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1040 if dep.atom is not None and dep.parent is not None:
1041 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1044 for parent_atom in arg_atoms:
1045 parent, atom = parent_atom
1046 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1047 self._add_parent_atom(pkg, parent_atom)
1049 """ This section determines whether we go deeper into dependencies or not.
1050 We want to go deeper on a few occasions:
1051 Installing package A, we need to make sure package A's deps are met.
1052 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1053 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1058 deep = self._dynamic_config.myparams.get("deep", 0)
1059 empty = "empty" in self._dynamic_config.myparams
1060 recurse = empty or deep is True or depth + 1 <= deep
1061 dep_stack = self._dynamic_config._dep_stack
1062 if "recurse" not in self._dynamic_config.myparams:
1064 elif pkg.installed and not recurse:
1065 dep_stack = self._dynamic_config._ignored_deps
1067 self._spinner_update()
1069 if not previously_added:
1070 dep_stack.append(pkg)
1073 def _check_masks(self, pkg):
1075 slot_key = (pkg.root, pkg.slot_atom)
1077 # Check for upgrades in the same slot that are
1078 # masked due to a LICENSE change in a newer
1079 # version that is not masked for any other reason.
1080 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1081 if other_pkg is not None and pkg < other_pkg:
1082 self._dynamic_config._masked_license_updates.add(other_pkg)
1084 def _add_parent_atom(self, pkg, parent_atom):
1085 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1086 if parent_atoms is None:
1087 parent_atoms = set()
1088 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1089 parent_atoms.add(parent_atom)
1091 def _add_slot_conflict(self, pkg):
1092 self._dynamic_config._slot_collision_nodes.add(pkg)
1093 slot_key = (pkg.slot_atom, pkg.root)
1094 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1095 if slot_nodes is None:
1097 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1098 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1101 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1103 mytype = pkg.type_name
1106 metadata = pkg.metadata
1107 myuse = self._pkg_use_enabled(pkg)
1109 depth = pkg.depth + 1
1110 removal_action = "remove" in self._dynamic_config.myparams
1113 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1115 edepend[k] = metadata[k]
1117 if not pkg.built and \
1118 "--buildpkgonly" in self._frozen_config.myopts and \
1119 "deep" not in self._dynamic_config.myparams and \
1120 "empty" not in self._dynamic_config.myparams:
1121 edepend["RDEPEND"] = ""
1122 edepend["PDEPEND"] = ""
1124 if pkg.built and not removal_action:
1125 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1126 # Pull in build time deps as requested, but marked them as
1127 # "optional" since they are not strictly required. This allows
1128 # more freedom in the merge order calculation for solving
1129 # circular dependencies. Don't convert to PDEPEND since that
1130 # could make --with-bdeps=y less effective if it is used to
1131 # adjust merge order to prevent built_with_use() calls from
1135 # built packages do not have build time dependencies.
1136 edepend["DEPEND"] = ""
1138 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1139 edepend["DEPEND"] = ""
1142 depend_root = myroot
1145 root_deps = self._frozen_config.myopts.get("--root-deps")
1146 if root_deps is not None:
1147 if root_deps is True:
1148 depend_root = myroot
1149 elif root_deps == "rdeps":
1150 edepend["DEPEND"] = ""
1153 (depend_root, edepend["DEPEND"],
1154 self._priority(buildtime=(not pkg.built),
1155 optional=pkg.built),
1157 (myroot, edepend["RDEPEND"],
1158 self._priority(runtime=True),
1160 (myroot, edepend["PDEPEND"],
1161 self._priority(runtime_post=True),
1165 debug = "--debug" in self._frozen_config.myopts
1166 strict = mytype != "installed"
1168 for dep_root, dep_string, dep_priority, ignore_blockers in deps:
1172 writemsg_level("\nParent: %s\n" % (pkg,),
1173 noiselevel=-1, level=logging.DEBUG)
1174 writemsg_level("Depstring: %s\n" % (dep_string,),
1175 noiselevel=-1, level=logging.DEBUG)
1176 writemsg_level("Priority: %s\n" % (dep_priority,),
1177 noiselevel=-1, level=logging.DEBUG)
1180 dep_string = portage.dep.use_reduce(dep_string,
1181 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1182 except portage.exception.InvalidDependString as e:
1183 if not pkg.installed:
1184 # should have been masked before it was selected
1188 # Try again, but omit the is_valid_flag argument, since
1189 # invalid USE conditionals are a common problem and it's
1190 # practical to ignore this issue for installed packages.
1192 dep_string = portage.dep.use_reduce(dep_string,
1193 uselist=self._pkg_use_enabled(pkg))
1194 except portage.exception.InvalidDependString as e:
1195 self._dynamic_config._masked_installed.add(pkg)
1200 dep_string = list(self._queue_disjunctive_deps(
1201 pkg, dep_root, dep_priority, dep_string))
1202 except portage.exception.InvalidDependString as e:
1204 self._dynamic_config._masked_installed.add(pkg)
1208 # should have been masked before it was selected
1214 dep_string = portage.dep.paren_enclose(dep_string,
1215 unevaluated_atom=True)
1217 if not self._add_pkg_dep_string(
1218 pkg, dep_root, dep_priority, dep_string,
1219 allow_unsatisfied, ignore_blockers=ignore_blockers):
1222 self._dynamic_config._traversed_pkg_deps.add(pkg)
1225 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1226 allow_unsatisfied, ignore_blockers=False):
1227 depth = pkg.depth + 1
1228 debug = "--debug" in self._frozen_config.myopts
1229 strict = pkg.type_name != "installed"
1232 writemsg_level("\nParent: %s\n" % (pkg,),
1233 noiselevel=-1, level=logging.DEBUG)
1234 writemsg_level("Depstring: %s\n" % (dep_string,),
1235 noiselevel=-1, level=logging.DEBUG)
1236 writemsg_level("Priority: %s\n" % (dep_priority,),
1237 noiselevel=-1, level=logging.DEBUG)
1240 selected_atoms = self._select_atoms(dep_root,
1241 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1242 strict=strict, priority=dep_priority)
1243 except portage.exception.InvalidDependString as e:
1245 self._dynamic_config._masked_installed.add(pkg)
1248 # should have been masked before it was selected
1252 writemsg_level("Candidates: %s\n" % \
1253 ([str(x) for x in selected_atoms[pkg]],),
1254 noiselevel=-1, level=logging.DEBUG)
1256 root_config = self._frozen_config.roots[dep_root]
1257 vardb = root_config.trees["vartree"].dbapi
1259 for atom, child in self._minimize_children(
1260 pkg, dep_priority, root_config, selected_atoms[pkg]):
1262 # If this was a specially generated virtual atom
1263 # from dep_check, map it back to the original, in
1264 # order to avoid distortion in places like display
1265 # or conflict resolution code.
1266 atom = getattr(atom, '_orig_atom', atom)
1268 if ignore_blockers and atom.blocker:
1269 # For --with-bdeps, ignore build-time only blockers
1270 # that originate from built packages.
1273 mypriority = dep_priority.copy()
1274 if not atom.blocker:
1275 inst_pkgs = vardb.match_pkgs(atom)
1277 for inst_pkg in inst_pkgs:
1278 if self._pkg_visibility_check(inst_pkg):
1280 mypriority.satisfied = inst_pkg
1282 if not mypriority.satisfied:
1283 # none visible, so use highest
1284 mypriority.satisfied = inst_pkgs[0]
1286 if not self._add_dep(Dependency(atom=atom,
1287 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1288 priority=mypriority, root=dep_root),
1289 allow_unsatisfied=allow_unsatisfied):
1292 selected_atoms.pop(pkg)
1294 # Add selected indirect virtual deps to the graph. This
1295 # takes advantage of circular dependency avoidance that's done
1296 # by dep_zapdeps. We preserve actual parent/child relationships
1297 # here in order to avoid distorting the dependency graph like
1298 # <=portage-2.1.6.x did.
1299 for virt_dep, atoms in selected_atoms.items():
1301 virt_pkg = virt_dep.child
1304 writemsg_level("Candidates: %s: %s\n" % \
1305 (virt_pkg.cpv, [str(x) for x in atoms]),
1306 noiselevel=-1, level=logging.DEBUG)
1308 if not self._add_pkg(virt_pkg, virt_dep):
1311 for atom, child in self._minimize_children(
1312 pkg, self._priority(runtime=True), root_config, atoms):
1314 # If this was a specially generated virtual atom
1315 # from dep_check, map it back to the original, in
1316 # order to avoid distortion in places like display
1317 # or conflict resolution code.
1318 atom = getattr(atom, '_orig_atom', atom)
1320 # This is a GLEP 37 virtual, so its deps are all runtime.
1321 mypriority = self._priority(runtime=True)
1322 if not atom.blocker:
1323 inst_pkgs = vardb.match_pkgs(atom)
1325 for inst_pkg in inst_pkgs:
1326 if self._pkg_visibility_check(inst_pkg):
1328 mypriority.satisfied = inst_pkg
1330 if not mypriority.satisfied:
1331 # none visible, so use highest
1332 mypriority.satisfied = inst_pkgs[0]
1334 if not self._add_dep(Dependency(atom=atom,
1335 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1336 parent=virt_pkg, priority=mypriority, root=dep_root),
1337 allow_unsatisfied=allow_unsatisfied):
1341 writemsg_level("Exiting... %s\n" % (pkg,),
1342 noiselevel=-1, level=logging.DEBUG)
1346 def _minimize_children(self, parent, priority, root_config, atoms):
1348 Selects packages to satisfy the given atoms, and minimizes the
1349 number of selected packages. This serves to identify and eliminate
1350 redundant package selections when multiple atoms happen to specify
1360 dep_pkg, existing_node = self._select_package(
1361 root_config.root, atom)
1365 atom_pkg_map[atom] = dep_pkg
1367 if len(atom_pkg_map) < 2:
1368 for item in atom_pkg_map.items():
1374 for atom, pkg in atom_pkg_map.items():
1375 pkg_atom_map.setdefault(pkg, set()).add(atom)
1376 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1378 for cp, pkgs in cp_pkg_map.items():
1381 for atom in pkg_atom_map[pkg]:
1385 # Use a digraph to identify and eliminate any
1386 # redundant package selections.
1387 atom_pkg_graph = digraph()
1390 for atom in pkg_atom_map[pkg1]:
1392 atom_pkg_graph.add(pkg1, atom)
1393 atom_set = InternalPackageSet(initial_atoms=(atom,))
1397 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1398 atom_pkg_graph.add(pkg2, atom)
1401 eliminate_pkg = True
1402 for atom in atom_pkg_graph.parent_nodes(pkg):
1403 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1404 eliminate_pkg = False
1407 atom_pkg_graph.remove(pkg)
1409 # Yield ~, =*, < and <= atoms first, since those are more likely to
1410 # cause slot conflicts, and we want those atoms to be displayed
1411 # in the resulting slot conflict message (see bug #291142).
1414 for atom in cp_atoms:
1416 for child_pkg in atom_pkg_graph.child_nodes(atom):
1417 existing_node, matches = \
1418 self._check_slot_conflict(child_pkg, atom)
1419 if existing_node and not matches:
1423 conflict_atoms.append(atom)
1425 normal_atoms.append(atom)
1427 for atom in chain(conflict_atoms, normal_atoms):
1428 child_pkgs = atom_pkg_graph.child_nodes(atom)
1429 # if more than one child, yield highest version
1430 if len(child_pkgs) > 1:
1432 yield (atom, child_pkgs[-1])
1434 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1436 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1437 Yields non-disjunctive deps. Raises InvalidDependString when
1441 while i < len(dep_struct):
1443 if isinstance(x, list):
1444 for y in self._queue_disjunctive_deps(
1445 pkg, dep_root, dep_priority, x):
1448 self._queue_disjunction(pkg, dep_root, dep_priority,
1449 [ x, dep_struct[ i + 1 ] ] )
1453 x = portage.dep.Atom(x)
1454 except portage.exception.InvalidAtom:
1455 if not pkg.installed:
1456 raise portage.exception.InvalidDependString(
1457 "invalid atom: '%s'" % x)
1459 # Note: Eventually this will check for PROPERTIES=virtual
1460 # or whatever other metadata gets implemented for this
1462 if x.cp.startswith('virtual/'):
1463 self._queue_disjunction( pkg, dep_root,
1464 dep_priority, [ str(x) ] )
1469 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1470 self._dynamic_config._dep_disjunctive_stack.append(
1471 (pkg, dep_root, dep_priority, dep_struct))
1473 def _pop_disjunction(self, allow_unsatisfied):
1475 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1476 populate self._dynamic_config._dep_stack.
1478 pkg, dep_root, dep_priority, dep_struct = \
1479 self._dynamic_config._dep_disjunctive_stack.pop()
1480 dep_string = portage.dep.paren_enclose(dep_struct,
1481 unevaluated_atom=True)
1482 if not self._add_pkg_dep_string(
1483 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1487 def _priority(self, **kwargs):
1488 if "remove" in self._dynamic_config.myparams:
1489 priority_constructor = UnmergeDepPriority
1491 priority_constructor = DepPriority
1492 return priority_constructor(**kwargs)
1494 def _dep_expand(self, root_config, atom_without_category):
1496 @param root_config: a root config instance
1497 @type root_config: RootConfig
1498 @param atom_without_category: an atom without a category component
1499 @type atom_without_category: String
1501 @returns: a list of atoms containing categories (possibly empty)
1503 null_cp = portage.dep_getkey(insert_category_into_atom(
1504 atom_without_category, "null"))
1505 cat, atom_pn = portage.catsplit(null_cp)
1507 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1509 for db, pkg_type, built, installed, db_keys in dbs:
1510 for cat in db.categories:
1511 if db.cp_list("%s/%s" % (cat, atom_pn)):
1515 for cat in categories:
1516 deps.append(Atom(insert_category_into_atom(
1517 atom_without_category, cat)))
1520 def _have_new_virt(self, root, atom_cp):
1522 for db, pkg_type, built, installed, db_keys in \
1523 self._dynamic_config._filtered_trees[root]["dbs"]:
1524 if db.cp_list(atom_cp):
1529 def _iter_atoms_for_pkg(self, pkg):
1530 depgraph_sets = self._dynamic_config.sets[pkg.root]
1531 atom_arg_map = depgraph_sets.atom_arg_map
1532 root_config = self._frozen_config.roots[pkg.root]
1533 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1534 if atom.cp != pkg.cp and \
1535 self._have_new_virt(pkg.root, atom.cp):
1538 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1539 visible_pkgs.reverse() # descending order
1541 for visible_pkg in visible_pkgs:
1542 if visible_pkg.cp != atom.cp:
1544 if pkg >= visible_pkg:
1545 # This is descending order, and we're not
1546 # interested in any versions <= pkg given.
1548 if pkg.slot_atom != visible_pkg.slot_atom:
1549 higher_slot = visible_pkg
1551 if higher_slot is not None:
1553 for arg in atom_arg_map[(atom, pkg.root)]:
1554 if isinstance(arg, PackageArg) and \
1559 def select_files(self, myfiles):
1560 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1561 self._dynamic_config._initial_arg_list and call self._resolve to create the
1562 appropriate depgraph and return a favorite list."""
1564 debug = "--debug" in self._frozen_config.myopts
1565 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1566 sets = root_config.sets
1567 depgraph_sets = self._dynamic_config.sets[root_config.root]
1569 myroot = self._frozen_config.target_root
1570 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1571 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1572 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1573 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1574 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1575 pkgsettings = self._frozen_config.pkgsettings[myroot]
1577 onlydeps = "--onlydeps" in self._frozen_config.myopts
1580 ext = os.path.splitext(x)[1]
1582 if not os.path.exists(x):
1584 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1585 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1586 elif os.path.exists(
1587 os.path.join(pkgsettings["PKGDIR"], x)):
1588 x = os.path.join(pkgsettings["PKGDIR"], x)
1590 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1591 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1592 return 0, myfavorites
1593 mytbz2=portage.xpak.tbz2(x)
1594 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1595 if os.path.realpath(x) != \
1596 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1597 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1598 self._dynamic_config._skip_restart = True
1599 return 0, myfavorites
1601 pkg = self._pkg(mykey, "binary", root_config,
1603 args.append(PackageArg(arg=x, package=pkg,
1604 root_config=root_config))
1605 elif ext==".ebuild":
1606 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1607 pkgdir = os.path.dirname(ebuild_path)
1608 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1609 cp = pkgdir[len(tree_root)+1:]
1610 e = portage.exception.PackageNotFound(
1611 ("%s is not in a valid portage tree " + \
1612 "hierarchy or does not exist") % x)
1613 if not portage.isvalidatom(cp):
1615 cat = portage.catsplit(cp)[0]
1616 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1617 if not portage.isvalidatom("="+mykey):
1619 ebuild_path = portdb.findname(mykey)
1621 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1622 cp, os.path.basename(ebuild_path)):
1623 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1624 self._dynamic_config._skip_restart = True
1625 return 0, myfavorites
1626 if mykey not in portdb.xmatch(
1627 "match-visible", portage.cpv_getkey(mykey)):
1628 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1629 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1630 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1631 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1634 raise portage.exception.PackageNotFound(
1635 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1636 pkg = self._pkg(mykey, "ebuild", root_config,
1638 args.append(PackageArg(arg=x, package=pkg,
1639 root_config=root_config))
1640 elif x.startswith(os.path.sep):
1641 if not x.startswith(myroot):
1642 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1643 " $ROOT.\n") % x, noiselevel=-1)
1644 self._dynamic_config._skip_restart = True
1646 # Queue these up since it's most efficient to handle
1647 # multiple files in a single iter_owners() call.
1648 lookup_owners.append(x)
1649 elif x.startswith("." + os.sep) or \
1650 x.startswith(".." + os.sep):
1651 f = os.path.abspath(x)
1652 if not f.startswith(myroot):
1653 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
1654 " $ROOT.\n") % (f, x), noiselevel=-1)
1655 self._dynamic_config._skip_restart = True
1657 lookup_owners.append(f)
1659 if x in ("system", "world"):
1661 if x.startswith(SETPREFIX):
1662 s = x[len(SETPREFIX):]
1664 raise portage.exception.PackageSetNotFound(s)
1665 if s in depgraph_sets.sets:
1668 depgraph_sets.sets[s] = pset
1669 args.append(SetArg(arg=x, pset=pset,
1670 root_config=root_config))
1672 if not is_valid_package_atom(x):
1673 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1675 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1676 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1677 self._dynamic_config._skip_restart = True
1679 # Don't expand categories or old-style virtuals here unless
1680 # necessary. Expansion of old-style virtuals here causes at
1681 # least the following problems:
1682 # 1) It's more difficult to determine which set(s) an atom
1683 # came from, if any.
1684 # 2) It takes away freedom from the resolver to choose other
1685 # possible expansions when necessary.
1687 args.append(AtomArg(arg=x, atom=Atom(x),
1688 root_config=root_config))
1690 expanded_atoms = self._dep_expand(root_config, x)
1691 installed_cp_set = set()
1692 for atom in expanded_atoms:
1693 if vardb.cp_list(atom.cp):
1694 installed_cp_set.add(atom.cp)
1696 if len(installed_cp_set) > 1:
1697 non_virtual_cps = set()
1698 for atom_cp in installed_cp_set:
1699 if not atom_cp.startswith("virtual/"):
1700 non_virtual_cps.add(atom_cp)
1701 if len(non_virtual_cps) == 1:
1702 installed_cp_set = non_virtual_cps
1704 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1705 installed_cp = next(iter(installed_cp_set))
1706 for atom in expanded_atoms:
1707 if atom.cp == installed_cp:
1709 for pkg in self._iter_match_pkgs_any(
1710 root_config, atom.without_use,
1712 if not pkg.installed:
1716 expanded_atoms = [atom]
1719 # If a non-virtual package and one or more virtual packages
1720 # are in expanded_atoms, use the non-virtual package.
1721 if len(expanded_atoms) > 1:
1722 number_of_virtuals = 0
1723 for expanded_atom in expanded_atoms:
1724 if expanded_atom.cp.startswith("virtual/"):
1725 number_of_virtuals += 1
1727 candidate = expanded_atom
1728 if len(expanded_atoms) - number_of_virtuals == 1:
1729 expanded_atoms = [ candidate ]
1731 if len(expanded_atoms) > 1:
1732 writemsg("\n\n", noiselevel=-1)
1733 ambiguous_package_name(x, expanded_atoms, root_config,
1734 self._frozen_config.spinner, self._frozen_config.myopts)
1735 self._dynamic_config._skip_restart = True
1736 return False, myfavorites
1738 atom = expanded_atoms[0]
1740 null_atom = Atom(insert_category_into_atom(x, "null"))
1741 cat, atom_pn = portage.catsplit(null_atom.cp)
1742 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1744 # Allow the depgraph to choose which virtual.
1745 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1749 args.append(AtomArg(arg=x, atom=atom,
1750 root_config=root_config))
1754 search_for_multiple = False
1755 if len(lookup_owners) > 1:
1756 search_for_multiple = True
1758 for x in lookup_owners:
1759 if not search_for_multiple and os.path.isdir(x):
1760 search_for_multiple = True
1761 relative_paths.append(x[len(myroot)-1:])
1764 for pkg, relative_path in \
1765 real_vardb._owners.iter_owners(relative_paths):
1766 owners.add(pkg.mycpv)
1767 if not search_for_multiple:
1771 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1772 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1773 self._dynamic_config._skip_restart = True
1777 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1779 # portage now masks packages with missing slot, but it's
1780 # possible that one was installed by an older version
1781 atom = Atom(portage.cpv_getkey(cpv))
1783 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1784 args.append(AtomArg(arg=atom, atom=atom,
1785 root_config=root_config))
1787 if "--update" in self._frozen_config.myopts:
1788 # In some cases, the greedy slots behavior can pull in a slot that
1789 # the user would want to uninstall due to it being blocked by a
1790 # newer version in a different slot. Therefore, it's necessary to
1791 # detect and discard any that should be uninstalled. Each time
1792 # that arguments are updated, package selections are repeated in
1793 # order to ensure consistency with the current arguments:
1795 # 1) Initialize args
1796 # 2) Select packages and generate initial greedy atoms
1797 # 3) Update args with greedy atoms
1798 # 4) Select packages and generate greedy atoms again, while
1799 # accounting for any blockers between selected packages
1800 # 5) Update args with revised greedy atoms
1802 self._set_args(args)
1805 greedy_args.append(arg)
1806 if not isinstance(arg, AtomArg):
1808 for atom in self._greedy_slots(arg.root_config, arg.atom):
1810 AtomArg(arg=arg.arg, atom=atom,
1811 root_config=arg.root_config))
1813 self._set_args(greedy_args)
1816 # Revise greedy atoms, accounting for any blockers
1817 # between selected packages.
1818 revised_greedy_args = []
1820 revised_greedy_args.append(arg)
1821 if not isinstance(arg, AtomArg):
1823 for atom in self._greedy_slots(arg.root_config, arg.atom,
1824 blocker_lookahead=True):
1825 revised_greedy_args.append(
1826 AtomArg(arg=arg.arg, atom=atom,
1827 root_config=arg.root_config))
1828 args = revised_greedy_args
1829 del revised_greedy_args
1831 self._set_args(args)
1833 myfavorites = set(myfavorites)
1835 if isinstance(arg, (AtomArg, PackageArg)):
1836 myfavorites.add(arg.atom)
1837 elif isinstance(arg, SetArg):
1838 myfavorites.add(arg.arg)
1839 myfavorites = list(myfavorites)
1842 portage.writemsg("\n", noiselevel=-1)
1843 # Order needs to be preserved since a feature of --nodeps
1844 # is to allow the user to force a specific merge order.
1845 self._dynamic_config._initial_arg_list = args[:]
1847 return self._resolve(myfavorites)
1849 def _resolve(self, myfavorites):
1850 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1851 call self._creategraph to process theier deps and return
1853 debug = "--debug" in self._frozen_config.myopts
1854 onlydeps = "--onlydeps" in self._frozen_config.myopts
1855 myroot = self._frozen_config.target_root
1856 pkgsettings = self._frozen_config.pkgsettings[myroot]
1857 pprovideddict = pkgsettings.pprovideddict
1858 virtuals = pkgsettings.getvirtuals()
1859 for arg in self._expand_set_args(
1860 self._dynamic_config._initial_arg_list,
1861 add_to_digraph=True):
1862 for atom in arg.pset.getAtoms():
1863 self._spinner_update()
1864 dep = Dependency(atom=atom, onlydeps=onlydeps,
1865 root=myroot, parent=arg)
1867 pprovided = pprovideddict.get(atom.cp)
1868 if pprovided and portage.match_from_list(atom, pprovided):
1869 # A provided package has been specified on the command line.
1870 self._dynamic_config._pprovided_args.append((arg, atom))
1872 if isinstance(arg, PackageArg):
1873 if not self._add_pkg(arg.package, dep) or \
1874 not self._create_graph():
1875 if not self.need_restart():
1876 sys.stderr.write(("\n\n!!! Problem " + \
1877 "resolving dependencies for %s\n") % \
1879 return 0, myfavorites
1882 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1883 (arg, atom), noiselevel=-1)
1884 pkg, existing_node = self._select_package(
1885 myroot, atom, onlydeps=onlydeps)
1887 pprovided_match = False
1888 for virt_choice in virtuals.get(atom.cp, []):
1889 expanded_atom = portage.dep.Atom(
1890 atom.replace(atom.cp, virt_choice.cp, 1))
1891 pprovided = pprovideddict.get(expanded_atom.cp)
1893 portage.match_from_list(expanded_atom, pprovided):
1894 # A provided package has been
1895 # specified on the command line.
1896 self._dynamic_config._pprovided_args.append((arg, atom))
1897 pprovided_match = True
1902 if not (isinstance(arg, SetArg) and \
1903 arg.name in ("selected", "system", "world")):
1904 self._dynamic_config._unsatisfied_deps_for_display.append(
1905 ((myroot, atom), {"myparent" : arg}))
1906 return 0, myfavorites
1908 self._dynamic_config._missing_args.append((arg, atom))
1910 if atom.cp != pkg.cp:
1911 # For old-style virtuals, we need to repeat the
1912 # package.provided check against the selected package.
1913 expanded_atom = atom.replace(atom.cp, pkg.cp)
1914 pprovided = pprovideddict.get(pkg.cp)
1916 portage.match_from_list(expanded_atom, pprovided):
1917 # A provided package has been
1918 # specified on the command line.
1919 self._dynamic_config._pprovided_args.append((arg, atom))
1921 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1922 self._dynamic_config._unsatisfied_deps_for_display.append(
1923 ((myroot, atom), {"myparent" : arg}))
1924 # Previous behavior was to bail out in this case, but
1925 # since the dep is satisfied by the installed package,
1926 # it's more friendly to continue building the graph
1927 # and just show a warning message. Therefore, only bail
1928 # out here if the atom is not from either the system or
1930 if not (isinstance(arg, SetArg) and \
1931 arg.name in ("selected", "system", "world")):
1932 return 0, myfavorites
1934 # Add the selected package to the graph as soon as possible
1935 # so that later dep_check() calls can use it as feedback
1936 # for making more consistent atom selections.
1937 if not self._add_pkg(pkg, dep):
1938 if self.need_restart():
1940 elif isinstance(arg, SetArg):
1941 writemsg(("\n\n!!! Problem resolving " + \
1942 "dependencies for %s from %s\n") % \
1943 (atom, arg.arg), noiselevel=-1)
1945 writemsg(("\n\n!!! Problem resolving " + \
1946 "dependencies for %s\n") % \
1947 (atom,), noiselevel=-1)
1948 return 0, myfavorites
1950 except SystemExit as e:
1951 raise # Needed else can't exit
1952 except Exception as e:
1953 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
1954 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
1957 # Now that the root packages have been added to the graph,
1958 # process the dependencies.
1959 if not self._create_graph():
1960 return 0, myfavorites
1964 except self._unknown_internal_error:
1965 return False, myfavorites
1967 if set(self._dynamic_config.digraph).intersection( \
1968 self._dynamic_config._needed_unstable_keywords) or \
1969 set(self._dynamic_config.digraph).intersection( \
1970 self._dynamic_config._needed_use_config_changes) or \
1971 set(self._dynamic_config.digraph).intersection( \
1972 self._dynamic_config._needed_license_changes) :
1973 #We failed if the user needs to change the configuration
1974 return False, myfavorites
1976 # We're true here unless we are missing binaries.
1977 return (True, myfavorites)
1979 def _set_args(self, args):
1981 Create the "__non_set_args__" package set from atoms and packages given as
1982 arguments. This method can be called multiple times if necessary.
1983 The package selection cache is automatically invalidated, since
1984 arguments influence package selections.
1989 for root in self._dynamic_config.sets:
1990 depgraph_sets = self._dynamic_config.sets[root]
1991 depgraph_sets.sets.setdefault('__non_set_args__',
1992 InternalPackageSet()).clear()
1993 depgraph_sets.atoms.clear()
1994 depgraph_sets.atom_arg_map.clear()
1995 set_atoms[root] = []
1996 non_set_atoms[root] = []
1998 # We don't add set args to the digraph here since that
1999 # happens at a later stage and we don't want to make
2000 # any state changes here that aren't reversed by a
2001 # another call to this method.
2002 for arg in self._expand_set_args(args, add_to_digraph=False):
2003 atom_arg_map = self._dynamic_config.sets[
2004 arg.root_config.root].atom_arg_map
2005 if isinstance(arg, SetArg):
2006 atom_group = set_atoms[arg.root_config.root]
2008 atom_group = non_set_atoms[arg.root_config.root]
2010 for atom in arg.pset.getAtoms():
2011 atom_group.append(atom)
2012 atom_key = (atom, arg.root_config.root)
2013 refs = atom_arg_map.get(atom_key)
2016 atom_arg_map[atom_key] = refs
2020 for root in self._dynamic_config.sets:
2021 depgraph_sets = self._dynamic_config.sets[root]
2022 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2023 non_set_atoms.get(root, [])))
2024 depgraph_sets.sets['__non_set_args__'].update(
2025 non_set_atoms.get(root, []))
2027 # Invalidate the package selection cache, since
2028 # arguments influence package selections.
2029 self._dynamic_config._highest_pkg_cache.clear()
2030 for trees in self._dynamic_config._filtered_trees.values():
2031 trees["porttree"].dbapi._clear_cache()
2033 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2035 Return a list of slot atoms corresponding to installed slots that
2036 differ from the slot of the highest visible match. When
2037 blocker_lookahead is True, slot atoms that would trigger a blocker
2038 conflict are automatically discarded, potentially allowing automatic
2039 uninstallation of older slots when appropriate.
2041 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2042 if highest_pkg is None:
2044 vardb = root_config.trees["vartree"].dbapi
2046 for cpv in vardb.match(atom):
2047 # don't mix new virtuals with old virtuals
2048 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2049 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2051 slots.add(highest_pkg.metadata["SLOT"])
2055 slots.remove(highest_pkg.metadata["SLOT"])
2058 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2059 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2060 if pkg is not None and \
2061 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2062 greedy_pkgs.append(pkg)
2065 if not blocker_lookahead:
2066 return [pkg.slot_atom for pkg in greedy_pkgs]
2069 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2070 for pkg in greedy_pkgs + [highest_pkg]:
2071 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2073 selected_atoms = self._select_atoms(
2074 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2075 parent=pkg, strict=True)
2076 except portage.exception.InvalidDependString:
2079 for atoms in selected_atoms.values():
2080 blocker_atoms.extend(x for x in atoms if x.blocker)
2081 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2083 if highest_pkg not in blockers:
2086 # filter packages with invalid deps
2087 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2089 # filter packages that conflict with highest_pkg
2090 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2091 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2092 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2097 # If two packages conflict, discard the lower version.
2098 discard_pkgs = set()
2099 greedy_pkgs.sort(reverse=True)
2100 for i in range(len(greedy_pkgs) - 1):
2101 pkg1 = greedy_pkgs[i]
2102 if pkg1 in discard_pkgs:
2104 for j in range(i + 1, len(greedy_pkgs)):
2105 pkg2 = greedy_pkgs[j]
2106 if pkg2 in discard_pkgs:
2108 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2109 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2111 discard_pkgs.add(pkg2)
2113 return [pkg.slot_atom for pkg in greedy_pkgs \
2114 if pkg not in discard_pkgs]
2116 def _select_atoms_from_graph(self, *pargs, **kwargs):
2118 Prefer atoms matching packages that have already been
2119 added to the graph or those that are installed and have
2120 not been scheduled for replacement.
2122 kwargs["trees"] = self._dynamic_config._graph_trees
2123 return self._select_atoms_highest_available(*pargs, **kwargs)
2125 def _select_atoms_highest_available(self, root, depstring,
2126 myuse=None, parent=None, strict=True, trees=None, priority=None):
2127 """This will raise InvalidDependString if necessary. If trees is
2128 None then self._dynamic_config._filtered_trees is used."""
2130 pkgsettings = self._frozen_config.pkgsettings[root]
2132 trees = self._dynamic_config._filtered_trees
2133 mytrees = trees[root]
2134 atom_graph = digraph()
2136 # Temporarily disable autounmask so that || preferences
2137 # account for masking and USE settings.
2138 _autounmask_backup = self._dynamic_config._autounmask
2139 self._dynamic_config._autounmask = False
2140 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2142 if parent is not None:
2143 trees[root]["parent"] = parent
2144 trees[root]["atom_graph"] = atom_graph
2145 if priority is not None:
2146 trees[root]["priority"] = priority
2147 mycheck = portage.dep_check(depstring, None,
2148 pkgsettings, myuse=myuse,
2149 myroot=root, trees=trees)
2151 self._dynamic_config._autounmask = _autounmask_backup
2152 del mytrees["pkg_use_enabled"]
2153 if parent is not None:
2154 trees[root].pop("parent")
2155 trees[root].pop("atom_graph")
2156 if priority is not None:
2157 trees[root].pop("priority")
2159 raise portage.exception.InvalidDependString(mycheck[1])
2161 selected_atoms = mycheck[1]
2163 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2164 selected_atoms = OrderedDict()
2165 node_stack = [(parent, None, None)]
2166 traversed_nodes = set()
2168 node, node_parent, parent_atom = node_stack.pop()
2169 traversed_nodes.add(node)
2173 if node_parent is parent:
2174 if priority is None:
2175 node_priority = None
2177 node_priority = priority.copy()
2179 # virtuals only have runtime deps
2180 node_priority = self._priority(runtime=True)
2181 k = Dependency(atom=parent_atom,
2182 blocker=parent_atom.blocker, child=node,
2183 depth=node.depth, parent=node_parent,
2184 priority=node_priority, root=node.root)
2187 selected_atoms[k] = child_atoms
2188 for atom_node in atom_graph.child_nodes(node):
2189 child_atom = atom_node[0]
2190 if id(child_atom) not in chosen_atom_ids:
2192 child_atoms.append(child_atom)
2193 for child_node in atom_graph.child_nodes(atom_node):
2194 if child_node in traversed_nodes:
2196 if not portage.match_from_list(
2197 child_atom, [child_node]):
2198 # Typically this means that the atom
2199 # specifies USE deps that are unsatisfied
2200 # by the selected package. The caller will
2201 # record this as an unsatisfied dependency
2204 child_node.depth = node.depth + 1
2205 node_stack.append((child_node, node, child_atom))
2207 return selected_atoms
2209 def _get_dep_chain(self, start_node, target_atom=None,
2210 unsatisfied_dependency=False):
2212 Returns a list of (atom, node_type) pairs that represent a dep chain.
2213 If target_atom is None, the first package shown is pkg's parent.
2214 If target_atom is not None the first package shown is pkg.
2215 If unsatisfied_dependency is True, the first parent is select who's
2216 dependency is not satisfied by 'pkg'. This is need for USE changes.
2217 (Does not support target_atom.)
2219 traversed_nodes = set()
2223 all_parents = self._dynamic_config._parent_atoms
2225 if target_atom is not None and isinstance(node, Package):
2226 affecting_use = set()
2227 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2228 affecting_use.update(extract_affecting_use(
2229 node.metadata[dep_str], target_atom))
2230 affecting_use.difference_update(node.use.mask, node.use.force)
2231 pkg_name = _unicode_decode("%s") % (node.cpv,)
2234 for flag in affecting_use:
2235 if flag in self._pkg_use_enabled(node):
2238 usedep.append("-"+flag)
2239 pkg_name += "[%s]" % ",".join(usedep)
2241 dep_chain.append((pkg_name, node.type_name))
2243 while node is not None:
2244 traversed_nodes.add(node)
2246 if isinstance(node, DependencyArg):
2247 if self._dynamic_config.digraph.parent_nodes(node):
2250 node_type = "argument"
2251 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2253 elif node is not start_node:
2254 for ppkg, patom in all_parents[child]:
2256 atom = patom.unevaluated_atom
2260 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2261 if priority.buildtime:
2262 dep_strings.add(node.metadata["DEPEND"])
2263 if priority.runtime:
2264 dep_strings.add(node.metadata["RDEPEND"])
2265 if priority.runtime_post:
2266 dep_strings.add(node.metadata["PDEPEND"])
2268 affecting_use = set()
2269 for dep_str in dep_strings:
2270 affecting_use.update(extract_affecting_use(dep_str, atom))
2272 #Don't show flags as 'affecting' if the user can't change them,
2273 affecting_use.difference_update(node.use.mask, \
2276 pkg_name = _unicode_decode("%s") % (node.cpv,)
2279 for flag in affecting_use:
2280 if flag in self._pkg_use_enabled(node):
2283 usedep.append("-"+flag)
2284 pkg_name += "[%s]" % ",".join(usedep)
2286 dep_chain.append((pkg_name, node.type_name))
2288 if node not in self._dynamic_config.digraph:
2289 # The parent is not in the graph due to backtracking.
2292 # When traversing to parents, prefer arguments over packages
2293 # since arguments are root nodes. Never traverse the same
2294 # package twice, in order to prevent an infinite loop.
2295 selected_parent = None
2296 for parent in self._dynamic_config.digraph.parent_nodes(node):
2297 if parent in traversed_nodes:
2299 if isinstance(parent, DependencyArg):
2300 if self._dynamic_config.digraph.parent_nodes(parent):
2301 selected_parent = parent
2305 (_unicode_decode("%s") % (parent,), "argument"))
2306 selected_parent = None
2309 if unsatisfied_dependency and node is start_node:
2310 # Make sure that pkg doesn't satisfy parent's dependency.
2311 # This ensures that we select the correct parent for use
2313 for ppkg, atom in all_parents[start_node]:
2315 atom_set = InternalPackageSet(initial_atoms=(atom,))
2316 if not atom_set.findAtomForPackage(start_node):
2317 selected_parent = parent
2321 selected_parent = parent
2323 node = selected_parent
2326 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2327 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2329 for node, node_type in dep_chain:
2330 if node_type == "argument":
2331 display_list.append("required by %s (argument)" % node)
2333 display_list.append("required by %s" % node)
2335 msg = "#" + ", ".join(display_list) + "\n"
2339 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2340 check_backtrack=False):
2342 When check_backtrack=True, no output is produced and
2343 the method either returns or raises _backtrack_mask if
2344 a matching package has been masked by backtracking.
2346 backtrack_mask = False
2347 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,))
2348 xinfo = '"%s"' % atom.unevaluated_atom
2351 if isinstance(myparent, AtomArg):
2352 xinfo = _unicode_decode('"%s"') % (myparent,)
2353 # Discard null/ from failed cpv_expand category expansion.
2354 xinfo = xinfo.replace("null/", "")
2356 xinfo = "%s for %s" % (xinfo, root)
2357 masked_packages = []
2359 missing_use_adjustable = set()
2360 required_use_unsatisfied = []
2361 masked_pkg_instances = set()
2362 missing_licenses = []
2363 have_eapi_mask = False
2364 pkgsettings = self._frozen_config.pkgsettings[root]
2365 root_config = self._frozen_config.roots[root]
2366 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2367 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2368 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2369 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2370 for db, pkg_type, built, installed, db_keys in dbs:
2374 if hasattr(db, "xmatch"):
2375 cpv_list = db.xmatch("match-all", atom.without_use)
2377 cpv_list = db.match(atom.without_use)
2380 for cpv in cpv_list:
2381 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, \
2382 pkg_type, built, installed, db_keys, _pkg_use_enabled=self._pkg_use_enabled)
2384 if metadata is not None:
2385 pkg = self._pkg(cpv, pkg_type, root_config,
2386 installed=installed)
2387 # pkg.metadata contains calculated USE for ebuilds,
2388 # required later for getMissingLicenses.
2389 metadata = pkg.metadata
2390 if pkg.cp != atom.cp:
2391 # A cpv can be returned from dbapi.match() as an
2392 # old-style virtual match even in cases when the
2393 # package does not actually PROVIDE the virtual.
2394 # Filter out any such false matches here.
2395 if not atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2397 if pkg in self._dynamic_config._runtime_pkg_mask:
2398 backtrack_reasons = \
2399 self._dynamic_config._runtime_pkg_mask[pkg]
2400 mreasons.append('backtracking: %s' % \
2401 ', '.join(sorted(backtrack_reasons)))
2402 backtrack_mask = True
2403 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2404 modified_use=self._pkg_use_enabled(pkg)):
2405 mreasons = ["exclude option"]
2407 masked_pkg_instances.add(pkg)
2408 if atom.unevaluated_atom.use:
2410 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2411 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2412 missing_use.append(pkg)
2416 writemsg("violated_conditionals raised " + \
2417 "InvalidAtom: '%s' parent: %s" % \
2418 (atom, myparent), noiselevel=-1)
2420 if not mreasons and \
2422 pkg.metadata["REQUIRED_USE"] and \
2423 eapi_has_required_use(pkg.metadata["EAPI"]):
2424 if not check_required_use(
2425 pkg.metadata["REQUIRED_USE"],
2426 self._pkg_use_enabled(pkg),
2427 pkg.iuse.is_valid_flag):
2428 required_use_unsatisfied.append(pkg)
2430 if pkg.built and not mreasons:
2431 mreasons = ["use flag configuration mismatch"]
2432 masked_packages.append(
2433 (root_config, pkgsettings, cpv, metadata, mreasons))
2437 raise self._backtrack_mask()
2441 missing_use_reasons = []
2442 missing_iuse_reasons = []
2443 for pkg in missing_use:
2444 use = self._pkg_use_enabled(pkg)
2446 #Use the unevaluated atom here, because some flags might have gone
2447 #lost during evaluation.
2448 required_flags = atom.unevaluated_atom.use.required
2449 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2453 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2454 missing_iuse_reasons.append((pkg, mreasons))
2456 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2457 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2459 untouchable_flags = \
2460 frozenset(chain(pkg.use.mask, pkg.use.force))
2461 if untouchable_flags.intersection(
2462 chain(need_enable, need_disable)):
2465 missing_use_adjustable.add(pkg)
2466 required_use = pkg.metadata["REQUIRED_USE"]
2467 required_use_warning = ""
2469 old_use = self._pkg_use_enabled(pkg)
2470 new_use = set(self._pkg_use_enabled(pkg))
2471 for flag in need_enable:
2473 for flag in need_disable:
2474 new_use.discard(flag)
2475 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2476 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2477 required_use_warning = ", this change violates use flag constraints " + \
2478 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2480 if need_enable or need_disable:
2482 changes.extend(colorize("red", "+" + x) \
2483 for x in need_enable)
2484 changes.extend(colorize("blue", "-" + x) \
2485 for x in need_disable)
2486 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2487 missing_use_reasons.append((pkg, mreasons))
2489 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2490 # Lets see if the violated use deps are conditional.
2491 # If so, suggest to change them on the parent.
2493 # If the child package is masked then a change to
2494 # parent USE is not a valid solution (a normal mask
2495 # message should be displayed instead).
2496 if pkg in masked_pkg_instances:
2500 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2501 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2502 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2503 #all violated use deps are conditional
2505 conditional = violated_atom.use.conditional
2506 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2507 conditional.enabled, conditional.disabled))
2509 untouchable_flags = \
2510 frozenset(chain(myparent.use.mask, myparent.use.force))
2511 if untouchable_flags.intersection(involved_flags):
2514 required_use = myparent.metadata["REQUIRED_USE"]
2515 required_use_warning = ""
2517 old_use = self._pkg_use_enabled(myparent)
2518 new_use = set(self._pkg_use_enabled(myparent))
2519 for flag in involved_flags:
2521 new_use.discard(flag)
2524 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2525 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2526 required_use_warning = ", this change violates use flag constraints " + \
2527 "defined by %s: '%s'" % (myparent.cpv, \
2528 human_readable_required_use(required_use))
2530 for flag in involved_flags:
2531 if flag in self._pkg_use_enabled(myparent):
2532 changes.append(colorize("blue", "-" + flag))
2534 changes.append(colorize("red", "+" + flag))
2535 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2536 if (myparent, mreasons) not in missing_use_reasons:
2537 missing_use_reasons.append((myparent, mreasons))
2539 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2540 in missing_use_reasons if pkg not in masked_pkg_instances]
2542 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2543 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2545 show_missing_use = False
2546 if unmasked_use_reasons:
2547 # Only show the latest version.
2548 show_missing_use = []
2550 parent_reason = None
2551 for pkg, mreasons in unmasked_use_reasons:
2553 if parent_reason is None:
2554 #This happens if a use change on the parent
2555 #leads to a satisfied conditional use dep.
2556 parent_reason = (pkg, mreasons)
2557 elif pkg_reason is None:
2558 #Don't rely on the first pkg in unmasked_use_reasons,
2559 #being the highest version of the dependency.
2560 pkg_reason = (pkg, mreasons)
2562 show_missing_use.append(pkg_reason)
2564 show_missing_use.append(parent_reason)
2566 elif unmasked_iuse_reasons:
2567 masked_with_iuse = False
2568 for pkg in masked_pkg_instances:
2569 #Use atom.unevaluated here, because some flags might have gone
2570 #lost during evaluation.
2571 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2572 # Package(s) with required IUSE are masked,
2573 # so display a normal masking message.
2574 masked_with_iuse = True
2576 if not masked_with_iuse:
2577 show_missing_use = unmasked_iuse_reasons
2579 if required_use_unsatisfied:
2580 # If there's a higher unmasked version in missing_use_adjustable
2581 # then we want to show that instead.
2582 for pkg in missing_use_adjustable:
2583 if pkg not in masked_pkg_instances and \
2584 pkg > required_use_unsatisfied[0]:
2585 required_use_unsatisfied = False
2590 if required_use_unsatisfied:
2591 # We have an unmasked package that only requires USE adjustment
2592 # in order to satisfy REQUIRED_USE, and nothing more. We assume
2593 # that the user wants the latest version, so only the first
2594 # instance is displayed.
2595 pkg = required_use_unsatisfied[0]
2596 output_cpv = pkg.cpv
2597 writemsg_stdout("\n!!! " + \
2598 colorize("BAD", "The ebuild selected to satisfy ") + \
2599 colorize("INFORM", xinfo) + \
2600 colorize("BAD", " has unmet requirements.") + "\n",
2602 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
2603 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
2605 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
2606 "are unsatisfied:\n", noiselevel=-1)
2607 reduced_noise = check_required_use(
2608 pkg.metadata["REQUIRED_USE"],
2609 self._pkg_use_enabled(pkg),
2610 pkg.iuse.is_valid_flag).tounicode()
2611 writemsg_stdout(" %s\n" % \
2612 human_readable_required_use(reduced_noise),
2614 normalized_required_use = \
2615 " ".join(pkg.metadata["REQUIRED_USE"].split())
2616 if reduced_noise != normalized_required_use:
2617 writemsg_stdout("\n The above constraints " + \
2618 "are a subset of the following complete expression:\n",
2620 writemsg_stdout(" %s\n" % \
2621 human_readable_required_use(normalized_required_use),
2623 writemsg_stdout("\n", noiselevel=-1)
2625 elif show_missing_use:
2626 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2627 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
2628 for pkg, mreasons in show_missing_use:
2629 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
2631 elif masked_packages:
2632 writemsg_stdout("\n!!! " + \
2633 colorize("BAD", "All ebuilds that could satisfy ") + \
2634 colorize("INFORM", xinfo) + \
2635 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
2636 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
2637 have_eapi_mask = show_masked_packages(masked_packages)
2639 writemsg_stdout("\n", noiselevel=-1)
2640 msg = ("The current version of portage supports " + \
2641 "EAPI '%s'. You must upgrade to a newer version" + \
2642 " of portage before EAPI masked packages can" + \
2643 " be installed.") % portage.const.EAPI
2644 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
2645 writemsg_stdout("\n", noiselevel=-1)
2648 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2649 if isinstance(myparent, AtomArg):
2650 cp = myparent.atom.cp
2651 cat, pkg = portage.catsplit(cp)
2655 writemsg_stdout("\nemerge: searching for similar names..."
2659 all_cp.update(vardb.cp_all())
2660 all_cp.update(portdb.cp_all())
2661 if "--usepkg" in self._frozen_config.myopts:
2662 all_cp.update(bindb.cp_all())
2665 matches = difflib.get_close_matches(cp, all_cp)
2668 for other_cp in all_cp:
2669 other_pkg = portage.catsplit(other_cp)[1]
2670 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
2671 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
2673 for pkg_match in pkg_matches:
2674 matches.extend(pkg_to_cp[pkg_match])
2676 if len(matches) == 1:
2677 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
2679 elif len(matches) > 1:
2681 "\nemerge: Maybe you meant any of these: %s?\n" % \
2682 (", ".join(matches),), noiselevel=-1)
2684 # Generally, this would only happen if
2685 # all dbapis are empty.
2686 writemsg_stdout(" nothing similar found.\n"
2689 if not isinstance(myparent, AtomArg):
2690 # It's redundant to show parent for AtomArg since
2691 # it's the same as 'xinfo' displayed above.
2692 dep_chain = self._get_dep_chain(myparent, atom)
2693 for node, node_type in dep_chain:
2694 msg.append('(dependency required by "%s" [%s])' % \
2695 (colorize('INFORM', _unicode_decode("%s") % \
2696 (node)), node_type))
2699 writemsg_stdout("\n".join(msg), noiselevel=-1)
2700 writemsg_stdout("\n", noiselevel=-1)
2704 writemsg_stdout("\n", noiselevel=-1)
2706 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
2707 for db, pkg_type, built, installed, db_keys in \
2708 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2709 for pkg in self._iter_match_pkgs(root_config,
2710 pkg_type, atom, onlydeps=onlydeps):
2713 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2715 Iterate over Package instances of pkg_type matching the given atom.
2716 This does not check visibility and it also does not match USE for
2717 unbuilt ebuilds since USE are lazily calculated after visibility
2718 checks (to avoid the expense when possible).
2721 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2723 if hasattr(db, "xmatch"):
2724 cpv_list = db.xmatch("match-all", atom)
2726 cpv_list = db.match(atom)
2728 # USE=multislot can make an installed package appear as if
2729 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2730 # won't do any good as long as USE=multislot is enabled since
2731 # the newly built package still won't have the expected slot.
2732 # Therefore, assume that such SLOT dependencies are already
2733 # satisfied rather than forcing a rebuild.
2734 installed = pkg_type == 'installed'
2735 if installed and not cpv_list and atom.slot:
2736 for cpv in db.match(atom.cp):
2737 slot_available = False
2738 for other_db, other_type, other_built, \
2739 other_installed, other_keys in \
2740 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2743 other_db.aux_get(cpv, ["SLOT"])[0]:
2744 slot_available = True
2748 if not slot_available:
2750 inst_pkg = self._pkg(cpv, "installed",
2751 root_config, installed=installed)
2752 # Remove the slot from the atom and verify that
2753 # the package matches the resulting atom.
2754 if portage.match_from_list(
2755 atom.without_slot, [inst_pkg]):
2763 for cpv in cpv_list:
2765 pkg = self._pkg(cpv, pkg_type, root_config,
2766 installed=installed, onlydeps=onlydeps)
2767 except portage.exception.PackageNotFound:
2770 if pkg.cp != atom.cp:
2771 # A cpv can be returned from dbapi.match() as an
2772 # old-style virtual match even in cases when the
2773 # package does not actually PROVIDE the virtual.
2774 # Filter out any such false matches here.
2775 if not InternalPackageSet(initial_atoms=(atom,)
2776 ).findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2780 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2781 cache_key = (root, atom, onlydeps)
2782 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2785 if pkg and not existing:
2786 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2787 if existing and existing == pkg:
2788 # Update the cache to reflect that the
2789 # package has been added to the graph.
2791 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2793 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2794 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2797 settings = pkg.root_config.settings
2798 if self._pkg_visibility_check(pkg) and \
2799 not (pkg.installed and pkg.masks):
2800 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2803 def _want_installed_pkg(self, pkg):
2805 Given an installed package returned from select_pkg, return
2806 True if the user has not explicitly requested for this package
2807 to be replaced (typically via an atom on the command line).
2809 if "selective" not in self._dynamic_config.myparams and \
2810 pkg.root == self._frozen_config.target_root:
2812 next(self._iter_atoms_for_pkg(pkg))
2813 except StopIteration:
2815 except portage.exception.InvalidDependString:
2821 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2822 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2824 default_selection = (pkg, existing)
2826 if self._dynamic_config._autounmask is True:
2827 if pkg is not None and \
2829 not self._want_installed_pkg(pkg):
2832 for only_use_changes in True, False:
2837 self._wrapped_select_pkg_highest_available_imp(
2838 root, atom, onlydeps=onlydeps,
2839 allow_use_changes=True,
2840 allow_unstable_keywords=(not only_use_changes),
2841 allow_license_changes=(not only_use_changes))
2843 if pkg is not None and \
2845 not self._want_installed_pkg(pkg):
2848 if self._dynamic_config._need_restart:
2852 # This ensures that we can fall back to an installed package
2853 # that may have been rejected in the autounmask path above.
2854 return default_selection
2856 return pkg, existing
2858 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False):
2863 if self._frozen_config.myopts.get('--autounmask', 'n') is not True:
2866 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
2867 root_config = self._frozen_config.roots[pkg.root]
2868 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
2870 masked_by_unstable_keywords = False
2871 missing_licenses = None
2872 masked_by_something_else = False
2874 for reason in mreasons:
2875 hint = reason.unmask_hint
2878 masked_by_something_else = True
2879 elif hint.key == "unstable keyword":
2880 masked_by_unstable_keywords = True
2881 elif hint.key == "license":
2882 missing_licenses = hint.value
2884 masked_by_something_else = True
2886 if masked_by_something_else:
2889 if pkg in self._dynamic_config._needed_unstable_keywords:
2890 #If the package is already keyworded, remove the mask.
2891 masked_by_unstable_keywords = False
2893 if missing_licenses:
2894 #If the needed licenses are already unmasked, remove the mask.
2895 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
2897 if not (masked_by_unstable_keywords or missing_licenses):
2898 #Package has already been unmasked.
2901 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
2902 (missing_licenses and not allow_license_changes):
2903 #We are not allowed to do the needed changes.
2906 if masked_by_unstable_keywords:
2907 self._dynamic_config._needed_unstable_keywords.add(pkg)
2909 if missing_licenses:
2910 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
2914 def _pkg_use_enabled(self, pkg, target_use=None):
2916 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
2917 If target_use is given, the need changes are computed to make the package useable.
2918 Example: target_use = { "foo": True, "bar": False }
2919 The flags target_use must be in the pkg's IUSE.
2921 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
2923 if target_use is None:
2924 if needed_use_config_change is None:
2925 return pkg.use.enabled
2927 return needed_use_config_change[0]
2929 if needed_use_config_change is not None:
2930 old_use = needed_use_config_change[0]
2932 old_changes = needed_use_config_change[1]
2933 new_changes = old_changes.copy()
2935 old_use = pkg.use.enabled
2940 for flag, state in target_use.items():
2942 if flag not in old_use:
2943 if new_changes.get(flag) == False:
2945 new_changes[flag] = True
2949 if new_changes.get(flag) == True:
2951 new_changes[flag] = False
2952 new_use.update(old_use.difference(target_use))
2954 def want_restart_for_use_change(pkg, new_use):
2955 if pkg not in self._dynamic_config.digraph.nodes:
2958 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
2959 dep = pkg.metadata[key]
2960 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2961 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2963 if old_val != new_val:
2966 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
2967 if not parent_atoms:
2970 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
2971 for ppkg, atom in parent_atoms:
2972 if not atom.use or \
2973 not atom.use.required.intersection(changes):
2980 if new_changes != old_changes:
2981 #Don't do the change if it violates REQUIRED_USE.
2982 required_use = pkg.metadata["REQUIRED_USE"]
2983 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2984 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2987 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
2988 if want_restart_for_use_change(pkg, new_use):
2989 self._dynamic_config._need_restart = True
2992 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
2993 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False):
2994 root_config = self._frozen_config.roots[root]
2995 pkgsettings = self._frozen_config.pkgsettings[root]
2996 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2997 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2998 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2999 # List of acceptable packages, ordered by type preference.
3000 matched_packages = []
3001 matched_pkgs_ignore_use = []
3002 highest_version = None
3003 if not isinstance(atom, portage.dep.Atom):
3004 atom = portage.dep.Atom(atom)
3006 atom_set = InternalPackageSet(initial_atoms=(atom,))
3007 existing_node = None
3009 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3010 usepkg = "--usepkg" in self._frozen_config.myopts
3011 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3012 empty = "empty" in self._dynamic_config.myparams
3013 selective = "selective" in self._dynamic_config.myparams
3015 noreplace = "--noreplace" in self._frozen_config.myopts
3016 avoid_update = "--update" not in self._frozen_config.myopts
3017 dont_miss_updates = "--update" in self._frozen_config.myopts
3018 use_ebuild_visibility = self._frozen_config.myopts.get(
3019 '--use-ebuild-visibility', 'n') != 'n'
3020 # Behavior of the "selective" parameter depends on
3021 # whether or not a package matches an argument atom.
3022 # If an installed package provides an old-style
3023 # virtual that is no longer provided by an available
3024 # package, the installed package may match an argument
3025 # atom even though none of the available packages do.
3026 # Therefore, "selective" logic does not consider
3027 # whether or not an installed package matches an
3028 # argument atom. It only considers whether or not
3029 # available packages match argument atoms, which is
3030 # represented by the found_available_arg flag.
3031 found_available_arg = False
3032 packages_with_invalid_use_config = []
3033 for find_existing_node in True, False:
3036 for db, pkg_type, built, installed, db_keys in dbs:
3039 if installed and not find_existing_node:
3040 want_reinstall = reinstall or empty or \
3041 (found_available_arg and not selective)
3042 if want_reinstall and matched_packages:
3045 # Ignore USE deps for the initial match since we want to
3046 # ensure that updates aren't missed solely due to the user's
3047 # USE configuration.
3048 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3050 if pkg in self._dynamic_config._runtime_pkg_mask:
3051 # The package has been masked by the backtracking logic
3054 if not pkg.installed and \
3055 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3056 modified_use=self._pkg_use_enabled(pkg)):
3059 if packages_with_invalid_use_config and \
3060 (not pkg.installed or dont_miss_updates):
3061 # Check if a higher version was rejected due to user
3062 # USE configuration. The packages_with_invalid_use_config
3063 # list only contains unbuilt ebuilds since USE can't
3064 # be changed for built packages.
3065 higher_version_rejected = False
3066 for rejected in packages_with_invalid_use_config:
3067 if rejected.cp != pkg.cp:
3070 higher_version_rejected = True
3072 if higher_version_rejected:
3076 # Make --noreplace take precedence over --newuse.
3077 if not pkg.installed and noreplace and \
3078 cpv in vardb.match(atom):
3079 inst_pkg = self._pkg(pkg.cpv, "installed",
3080 root_config, installed=True)
3081 if inst_pkg.visible:
3082 # If the installed version is masked, it may
3083 # be necessary to look at lower versions,
3084 # in case there is a visible downgrade.
3086 reinstall_for_flags = None
3088 if not pkg.installed or \
3089 (matched_packages and not avoid_update):
3090 # Only enforce visibility on installed packages
3091 # if there is at least one other visible package
3092 # available. By filtering installed masked packages
3093 # here, packages that have been masked since they
3094 # were installed can be automatically downgraded
3095 # to an unmasked version. NOTE: This code needs to
3096 # be consistent with masking behavior inside
3097 # _dep_check_composite_db, in order to prevent
3098 # incorrect choices in || deps like bug #351828.
3100 if not self._pkg_visibility_check(pkg, \
3101 allow_unstable_keywords=allow_unstable_keywords,
3102 allow_license_changes=allow_license_changes):
3105 # Enable upgrade or downgrade to a version
3106 # with visible KEYWORDS when the installed
3107 # version is masked by KEYWORDS, but never
3108 # reinstall the same exact version only due
3109 # to a KEYWORDS mask. See bug #252167.
3111 if pkg.type_name != "ebuild" and matched_packages:
3112 # Don't re-install a binary package that is
3113 # identical to the currently installed package
3114 # (see bug #354441).
3115 identical_binary = False
3116 if usepkg and pkg.installed:
3117 for selected_pkg in matched_packages:
3118 if selected_pkg.type_name == "binary" and \
3119 selected_pkg.cpv == pkg.cpv and \
3120 selected_pkg.metadata.get('BUILD_TIME') == \
3121 pkg.metadata.get('BUILD_TIME'):
3122 identical_binary = True
3125 if not identical_binary:
3126 # If the ebuild no longer exists or it's
3127 # keywords have been dropped, reject built
3128 # instances (installed or binary).
3129 # If --usepkgonly is enabled, assume that
3130 # the ebuild status should be ignored.
3131 if not use_ebuild_visibility and usepkgonly:
3132 if pkg.installed and pkg.masks:
3137 pkg.cpv, "ebuild", root_config)
3138 except portage.exception.PackageNotFound:
3141 if not self._pkg_visibility_check(pkg_eb, \
3142 allow_unstable_keywords=allow_unstable_keywords,
3143 allow_license_changes=allow_license_changes):
3146 # Calculation of USE for unbuilt ebuilds is relatively
3147 # expensive, so it is only performed lazily, after the
3148 # above visibility checks are complete.
3151 if root == self._frozen_config.target_root:
3153 myarg = next(self._iter_atoms_for_pkg(pkg))
3154 except StopIteration:
3156 except portage.exception.InvalidDependString:
3158 # masked by corruption
3160 if not installed and myarg:
3161 found_available_arg = True
3163 if atom.unevaluated_atom.use:
3164 #Make sure we don't miss a 'missing IUSE'.
3165 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3166 # Don't add this to packages_with_invalid_use_config
3167 # since IUSE cannot be adjusted by the user.
3172 matched_pkgs_ignore_use.append(pkg)
3173 if allow_use_changes:
3175 for flag in atom.use.enabled:
3176 target_use[flag] = True
3177 for flag in atom.use.disabled:
3178 target_use[flag] = False
3179 use = self._pkg_use_enabled(pkg, target_use)
3181 use = self._pkg_use_enabled(pkg)
3184 can_adjust_use = not pkg.built
3185 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3186 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3188 if atom.use.enabled:
3189 if atom.use.enabled.intersection(missing_disabled):
3191 can_adjust_use = False
3192 need_enabled = atom.use.enabled.difference(use)
3194 need_enabled = need_enabled.difference(missing_enabled)
3198 if pkg.use.mask.intersection(need_enabled):
3199 can_adjust_use = False
3201 if atom.use.disabled:
3202 if atom.use.disabled.intersection(missing_enabled):
3204 can_adjust_use = False
3205 need_disabled = atom.use.disabled.intersection(use)
3207 need_disabled = need_disabled.difference(missing_disabled)
3211 if pkg.use.force.difference(
3212 pkg.use.mask).intersection(need_disabled):
3213 can_adjust_use = False
3217 # Above we must ensure that this package has
3218 # absolutely no use.force, use.mask, or IUSE
3219 # issues that the user typically can't make
3220 # adjustments to solve (see bug #345979).
3221 # FIXME: Conditional USE deps complicate
3222 # issues. This code currently excludes cases
3223 # in which the user can adjust the parent
3224 # package's USE in order to satisfy the dep.
3225 packages_with_invalid_use_config.append(pkg)
3228 if pkg.cp == atom_cp:
3229 if highest_version is None:
3230 highest_version = pkg
3231 elif pkg > highest_version:
3232 highest_version = pkg
3233 # At this point, we've found the highest visible
3234 # match from the current repo. Any lower versions
3235 # from this repo are ignored, so this so the loop
3236 # will always end with a break statement below
3238 if find_existing_node:
3239 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3242 # Use PackageSet.findAtomForPackage()
3243 # for PROVIDE support.
3244 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3245 if highest_version and \
3246 e_pkg.cp == atom_cp and \
3247 e_pkg < highest_version and \
3248 e_pkg.slot_atom != highest_version.slot_atom:
3249 # There is a higher version available in a
3250 # different slot, so this existing node is
3254 matched_packages.append(e_pkg)
3255 existing_node = e_pkg
3257 # Compare built package to current config and
3258 # reject the built package if necessary.
3259 if built and (not installed or matched_pkgs_ignore_use) and \
3260 ("--newuse" in self._frozen_config.myopts or \
3261 "--reinstall" in self._frozen_config.myopts or \
3262 "--binpkg-respect-use" in self._frozen_config.myopts):
3263 iuses = pkg.iuse.all
3264 old_use = self._pkg_use_enabled(pkg)
3266 pkgsettings.setcpv(myeb)
3268 pkgsettings.setcpv(pkg)
3269 now_use = pkgsettings["PORTAGE_USE"].split()
3270 forced_flags = set()
3271 forced_flags.update(pkgsettings.useforce)
3272 forced_flags.update(pkgsettings.usemask)
3274 if myeb and not usepkgonly:
3275 cur_iuse = myeb.iuse.all
3276 if self._reinstall_for_flags(forced_flags,
3280 # Compare current config to installed package
3281 # and do not reinstall if possible.
3282 if not installed and \
3283 ("--newuse" in self._frozen_config.myopts or \
3284 "--reinstall" in self._frozen_config.myopts) and \
3285 cpv in vardb.match(atom):
3286 forced_flags = set()
3287 forced_flags.update(pkg.use.force)
3288 forced_flags.update(pkg.use.mask)
3289 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
3290 old_use = inst_pkg.use.enabled
3291 old_iuse = inst_pkg.iuse.all
3292 cur_use = self._pkg_use_enabled(pkg)
3293 cur_iuse = pkg.iuse.all
3294 reinstall_for_flags = \
3295 self._reinstall_for_flags(
3296 forced_flags, old_use, old_iuse,
3298 if reinstall_for_flags:
3302 matched_packages.append(pkg)
3303 if reinstall_for_flags:
3304 self._dynamic_config._reinstall_nodes[pkg] = \
3308 if not matched_packages:
3311 if "--debug" in self._frozen_config.myopts:
3312 for pkg in matched_packages:
3313 portage.writemsg("%s %s\n" % \
3314 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3316 # Filter out any old-style virtual matches if they are
3317 # mixed with new-style virtual matches.
3319 if len(matched_packages) > 1 and \
3320 "virtual" == portage.catsplit(cp)[0]:
3321 for pkg in matched_packages:
3324 # Got a new-style virtual, so filter
3325 # out any old-style virtuals.
3326 matched_packages = [pkg for pkg in matched_packages \
3330 if existing_node is not None and \
3331 existing_node in matched_packages:
3332 return existing_node, existing_node
3334 if len(matched_packages) > 1:
3335 if rebuilt_binaries:
3338 for pkg in matched_packages:
3343 if built_pkg is not None and inst_pkg is not None:
3344 # Only reinstall if binary package BUILD_TIME is
3345 # non-empty, in order to avoid cases like to
3346 # bug #306659 where BUILD_TIME fields are missing
3347 # in local and/or remote Packages file.
3349 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3350 except (KeyError, ValueError):
3354 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3355 except (KeyError, ValueError):
3356 installed_timestamp = 0
3358 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3359 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3360 if built_timestamp and \
3361 built_timestamp > installed_timestamp and \
3362 built_timestamp >= minimal_timestamp:
3363 return built_pkg, existing_node
3365 #Don't care if the binary has an older BUILD_TIME than the installed
3366 #package. This is for closely tracking a binhost.
3367 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3369 if built_timestamp and \
3370 built_timestamp != installed_timestamp:
3371 return built_pkg, existing_node
3373 for pkg in matched_packages:
3374 if pkg.installed and pkg.invalid:
3375 matched_packages = [x for x in \
3376 matched_packages if x is not pkg]
3379 for pkg in matched_packages:
3380 if pkg.installed and self._pkg_visibility_check(pkg, \
3381 allow_unstable_keywords=allow_unstable_keywords,
3382 allow_license_changes=allow_license_changes):
3383 return pkg, existing_node
3385 bestmatch = portage.best(
3386 [pkg.cpv for pkg in matched_packages \
3387 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3388 allow_license_changes=allow_license_changes)])
3390 # all are masked, so ignore visibility
3391 bestmatch = portage.best(
3392 [pkg.cpv for pkg in matched_packages])
3393 matched_packages = [pkg for pkg in matched_packages \
3394 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3396 # ordered by type preference ("ebuild" type is the last resort)
3397 return matched_packages[-1], existing_node
3399 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3401 Select packages that have already been added to the graph or
3402 those that are installed and have not been scheduled for
3405 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3406 matches = graph_db.match_pkgs(atom)
3409 pkg = matches[-1] # highest match
3410 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3411 return pkg, in_graph
3413 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3415 Select packages that are installed.
3417 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3418 matches = vardb.match_pkgs(atom)
3421 if len(matches) > 1:
3422 unmasked = [pkg for pkg in matches if \
3423 self._pkg_visibility_check(pkg)]
3425 if len(unmasked) == 1:
3428 # Account for packages with masks (like KEYWORDS masks)
3429 # that are usually ignored in visibility checks for
3430 # installed packages, in order to handle cases like
3432 unmasked = [pkg for pkg in matches if not pkg.masks]
3435 pkg = matches[-1] # highest match
3436 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3437 return pkg, in_graph
3439 def _complete_graph(self, required_sets=None):
3441 Add any deep dependencies of required sets (args, system, world) that
3442 have not been pulled into the graph yet. This ensures that the graph
3443 is consistent such that initially satisfied deep dependencies are not
3444 broken in the new graph. Initially unsatisfied dependencies are
3445 irrelevant since we only want to avoid breaking dependencies that are
3446 initially satisfied.
3448 Since this method can consume enough time to disturb users, it is
3449 currently only enabled by the --complete-graph option.
3451 @param required_sets: contains required sets (currently only used
3452 for depclean and prune removal operations)
3453 @type required_sets: dict
3455 if "--buildpkgonly" in self._frozen_config.myopts or \
3456 "recurse" not in self._dynamic_config.myparams:
3459 if "complete" not in self._dynamic_config.myparams:
3460 # Automatically enable complete mode if there are any
3461 # downgrades, since they often break dependencies
3462 # (like in bug #353613).
3463 have_downgrade = False
3464 for node in self._dynamic_config.digraph:
3465 if not isinstance(node, Package) or \
3466 node.operation != "merge":
3468 vardb = self._frozen_config.roots[
3469 node.root].trees["vartree"].dbapi
3470 inst_pkg = vardb.match_pkgs(node.slot_atom)
3471 if inst_pkg and inst_pkg[0] > node:
3472 have_downgrade = True
3476 self._dynamic_config.myparams["complete"] = True
3478 # Skip complete graph mode, in order to avoid consuming
3479 # enough time to disturb users.
3484 # Put the depgraph into a mode that causes it to only
3485 # select packages that have already been added to the
3486 # graph or those that are installed and have not been
3487 # scheduled for replacement. Also, toggle the "deep"
3488 # parameter so that all dependencies are traversed and
3490 self._select_atoms = self._select_atoms_from_graph
3491 if "remove" in self._dynamic_config.myparams:
3492 self._select_package = self._select_pkg_from_installed
3494 self._select_package = self._select_pkg_from_graph
3495 already_deep = self._dynamic_config.myparams.get("deep") is True
3496 if not already_deep:
3497 self._dynamic_config.myparams["deep"] = True
3499 # Invalidate the package selection cache, since
3500 # _select_package has just changed implementations.
3501 for trees in self._dynamic_config._filtered_trees.values():
3502 trees["porttree"].dbapi._clear_cache()
3504 args = self._dynamic_config._initial_arg_list[:]
3505 for root in self._frozen_config.roots:
3506 if root != self._frozen_config.target_root and \
3507 "remove" in self._dynamic_config.myparams:
3508 # Only pull in deps for the relevant root.
3510 depgraph_sets = self._dynamic_config.sets[root]
3511 required_set_names = self._frozen_config._required_set_names.copy()
3512 remaining_args = required_set_names.copy()
3513 if required_sets is None or root not in required_sets:
3516 # Removal actions may override sets with temporary
3517 # replacements that have had atoms removed in order
3518 # to implement --deselect behavior.
3519 required_set_names = set(required_sets[root])
3520 depgraph_sets.sets.clear()
3521 depgraph_sets.sets.update(required_sets[root])
3522 if "remove" not in self._dynamic_config.myparams and \
3523 root == self._frozen_config.target_root and \
3524 (already_deep or "empty" in self._dynamic_config.myparams):
3525 remaining_args.difference_update(depgraph_sets.sets)
3526 if not remaining_args and \
3527 not self._dynamic_config._ignored_deps and \
3528 not self._dynamic_config._dep_stack:
3530 root_config = self._frozen_config.roots[root]
3531 for s in required_set_names:
3532 pset = depgraph_sets.sets.get(s)
3534 pset = root_config.sets[s]
3535 atom = SETPREFIX + s
3536 args.append(SetArg(arg=atom, pset=pset,
3537 root_config=root_config))
3539 self._set_args(args)
3540 for arg in self._expand_set_args(args, add_to_digraph=True):
3541 for atom in arg.pset.getAtoms():
3542 self._dynamic_config._dep_stack.append(
3543 Dependency(atom=atom, root=arg.root_config.root,
3547 if self._dynamic_config._ignored_deps:
3548 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
3549 self._dynamic_config._ignored_deps = []
3550 if not self._create_graph(allow_unsatisfied=True):
3552 # Check the unsatisfied deps to see if any initially satisfied deps
3553 # will become unsatisfied due to an upgrade. Initially unsatisfied
3554 # deps are irrelevant since we only want to avoid breaking deps
3555 # that are initially satisfied.
3556 while self._dynamic_config._unsatisfied_deps:
3557 dep = self._dynamic_config._unsatisfied_deps.pop()
3558 vardb = self._frozen_config.roots[
3559 dep.root].trees["vartree"].dbapi
3560 matches = vardb.match_pkgs(dep.atom)
3562 self._dynamic_config._initially_unsatisfied_deps.append(dep)
3564 # An scheduled installation broke a deep dependency.
3565 # Add the installed package to the graph so that it
3566 # will be appropriately reported as a slot collision
3567 # (possibly solvable via backtracking).
3568 pkg = matches[-1] # highest match
3569 if not self._add_pkg(pkg, dep):
3571 if not self._create_graph(allow_unsatisfied=True):
3575 def _pkg(self, cpv, type_name, root_config, installed=False,
3578 Get a package instance from the cache, or create a new
3579 one if necessary. Raises PackageNotFound from aux_get if it
3580 failures for some reason (package does not exist or is
3584 if installed or onlydeps:
3585 operation = "nomerge"
3586 # Ensure that we use the specially optimized RootConfig instance
3587 # that refers to FakeVartree instead of the real vartree.
3588 root_config = self._frozen_config.roots[root_config.root]
3589 pkg = self._frozen_config._pkg_cache.get(
3590 (type_name, root_config.root, cpv, operation))
3591 if pkg is None and onlydeps and not installed:
3592 # Maybe it already got pulled in as a "merge" node.
3593 pkg = self._dynamic_config.mydbapi[root_config.root].get(
3594 (type_name, root_config.root, cpv, 'merge'))
3597 tree_type = self.pkg_tree_map[type_name]
3598 db = root_config.trees[tree_type].dbapi
3599 db_keys = list(self._frozen_config._trees_orig[root_config.root][
3600 tree_type].dbapi._aux_cache_keys)
3602 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
3604 raise portage.exception.PackageNotFound(cpv)
3605 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
3606 installed=installed, metadata=metadata, onlydeps=onlydeps,
3607 root_config=root_config, type_name=type_name)
3608 self._frozen_config._pkg_cache[pkg] = pkg
3610 if not self._pkg_visibility_check(pkg) and \
3611 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
3612 slot_key = (pkg.root, pkg.slot_atom)
3613 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
3614 if other_pkg is None or pkg > other_pkg:
3615 self._frozen_config._highest_license_masked[slot_key] = pkg
3619 def _validate_blockers(self):
3620 """Remove any blockers from the digraph that do not match any of the
3621 packages within the graph. If necessary, create hard deps to ensure
3622 correct merge order such that mutually blocking packages are never
3623 installed simultaneously."""
3625 if "--buildpkgonly" in self._frozen_config.myopts or \
3626 "--nodeps" in self._frozen_config.myopts:
3629 complete = "complete" in self._dynamic_config.myparams
3630 deep = "deep" in self._dynamic_config.myparams
3633 # Pull in blockers from all installed packages that haven't already
3634 # been pulled into the depgraph. This is not enabled by default
3635 # due to the performance penalty that is incurred by all the
3636 # additional dep_check calls that are required.
3638 # For installed packages, always ignore blockers from DEPEND since
3639 # only runtime dependencies should be relevant for packages that
3640 # are already built.
3641 dep_keys = ["RDEPEND", "PDEPEND"]
3642 for myroot in self._frozen_config.trees:
3643 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
3644 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
3645 pkgsettings = self._frozen_config.pkgsettings[myroot]
3646 root_config = self._frozen_config.roots[myroot]
3647 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
3648 final_db = self._dynamic_config.mydbapi[myroot]
3650 blocker_cache = BlockerCache(myroot, vardb)
3651 stale_cache = set(blocker_cache)
3654 stale_cache.discard(cpv)
3655 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
3657 pkg in self._dynamic_config._traversed_pkg_deps
3659 # Check for masked installed packages. Only warn about
3660 # packages that are in the graph in order to avoid warning
3661 # about those that will be automatically uninstalled during
3662 # the merge process or by --depclean. Always warn about
3663 # packages masked by license, since the user likely wants
3664 # to adjust ACCEPT_LICENSE.
3666 if not self._pkg_visibility_check(pkg) and \
3667 (pkg_in_graph or 'LICENSE' in pkg.masks):
3668 self._dynamic_config._masked_installed.add(pkg)
3670 self._check_masks(pkg)
3672 blocker_atoms = None
3678 self._dynamic_config._blocker_parents.child_nodes(pkg))
3683 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
3687 # Select just the runtime blockers.
3688 blockers = [blocker for blocker in blockers \
3689 if blocker.priority.runtime or \
3690 blocker.priority.runtime_post]
3691 if blockers is not None:
3692 blockers = set(blocker.atom for blocker in blockers)
3694 # If this node has any blockers, create a "nomerge"
3695 # node for it so that they can be enforced.
3696 self._spinner_update()
3697 blocker_data = blocker_cache.get(cpv)
3698 if blocker_data is not None and \
3699 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3702 # If blocker data from the graph is available, use
3703 # it to validate the cache and update the cache if
3705 if blocker_data is not None and \
3706 blockers is not None:
3707 if not blockers.symmetric_difference(
3708 blocker_data.atoms):
3712 if blocker_data is None and \
3713 blockers is not None:
3714 # Re-use the blockers from the graph.
3715 blocker_atoms = sorted(blockers)
3716 counter = long(pkg.metadata["COUNTER"])
3718 blocker_cache.BlockerData(counter, blocker_atoms)
3719 blocker_cache[pkg.cpv] = blocker_data
3723 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
3725 # Use aux_get() to trigger FakeVartree global
3726 # updates on *DEPEND when appropriate.
3727 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3728 # It is crucial to pass in final_db here in order to
3729 # optimize dep_check calls by eliminating atoms via
3730 # dep_wordreduce and dep_eval calls.
3732 success, atoms = portage.dep_check(depstr,
3733 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
3734 trees=self._dynamic_config._graph_trees, myroot=myroot)
3737 except Exception as e:
3738 # This is helpful, for example, if a ValueError
3739 # is thrown from cpv_expand due to multiple
3740 # matches (this can happen if an atom lacks a
3742 show_invalid_depstring_notice(
3743 pkg, depstr, str(e))
3747 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3748 if replacement_pkg and \
3749 replacement_pkg[0].operation == "merge":
3750 # This package is being replaced anyway, so
3751 # ignore invalid dependencies so as not to
3752 # annoy the user too much (otherwise they'd be
3753 # forced to manually unmerge it first).
3755 show_invalid_depstring_notice(pkg, depstr, atoms)
3757 blocker_atoms = [myatom for myatom in atoms \
3759 blocker_atoms.sort()
3760 counter = long(pkg.metadata["COUNTER"])
3761 blocker_cache[cpv] = \
3762 blocker_cache.BlockerData(counter, blocker_atoms)
3765 for atom in blocker_atoms:
3766 blocker = Blocker(atom=atom,
3767 eapi=pkg.metadata["EAPI"],
3768 priority=self._priority(runtime=True),
3770 self._dynamic_config._blocker_parents.add(blocker, pkg)
3771 except portage.exception.InvalidAtom as e:
3772 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3773 show_invalid_depstring_notice(
3774 pkg, depstr, "Invalid Atom: %s" % (e,))
3776 for cpv in stale_cache:
3777 del blocker_cache[cpv]
3778 blocker_cache.flush()
3781 # Discard any "uninstall" tasks scheduled by previous calls
3782 # to this method, since those tasks may not make sense given
3783 # the current graph state.
3784 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
3785 if previous_uninstall_tasks:
3786 self._dynamic_config._blocker_uninstalls = digraph()
3787 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
3789 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
3790 self._spinner_update()
3791 root_config = self._frozen_config.roots[blocker.root]
3792 virtuals = root_config.settings.getvirtuals()
3793 myroot = blocker.root
3794 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
3795 final_db = self._dynamic_config.mydbapi[myroot]
3797 provider_virtual = False
3798 if blocker.cp in virtuals and \
3799 not self._have_new_virt(blocker.root, blocker.cp):
3800 provider_virtual = True
3802 # Use this to check PROVIDE for each matched package
3804 atom_set = InternalPackageSet(
3805 initial_atoms=[blocker.atom])
3807 if provider_virtual:
3809 for provider_entry in virtuals[blocker.cp]:
3810 atoms.append(Atom(blocker.atom.replace(
3811 blocker.cp, provider_entry.cp, 1)))
3813 atoms = [blocker.atom]
3815 blocked_initial = set()
3817 for pkg in initial_db.match_pkgs(atom):
3818 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3819 blocked_initial.add(pkg)
3821 blocked_final = set()
3823 for pkg in final_db.match_pkgs(atom):
3824 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3825 blocked_final.add(pkg)
3827 if not blocked_initial and not blocked_final:
3828 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
3829 self._dynamic_config._blocker_parents.remove(blocker)
3830 # Discard any parents that don't have any more blockers.
3831 for pkg in parent_pkgs:
3832 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
3833 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3834 self._dynamic_config._blocker_parents.remove(pkg)
3836 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3837 unresolved_blocks = False
3838 depends_on_order = set()
3839 for pkg in blocked_initial:
3840 if pkg.slot_atom == parent.slot_atom and \
3841 not blocker.atom.blocker.overlap.forbid:
3842 # New !!atom blockers do not allow temporary
3843 # simulaneous installation, so unlike !atom
3844 # blockers, !!atom blockers aren't ignored
3845 # when they match other packages occupying
3848 if parent.installed:
3849 # Two currently installed packages conflict with
3850 # eachother. Ignore this case since the damage
3851 # is already done and this would be likely to
3852 # confuse users if displayed like a normal blocker.
3855 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3857 if parent.operation == "merge":
3858 # Maybe the blocked package can be replaced or simply
3859 # unmerged to resolve this block.
3860 depends_on_order.add((pkg, parent))
3862 # None of the above blocker resolutions techniques apply,
3863 # so apparently this one is unresolvable.
3864 unresolved_blocks = True
3865 for pkg in blocked_final:
3866 if pkg.slot_atom == parent.slot_atom and \
3867 not blocker.atom.blocker.overlap.forbid:
3868 # New !!atom blockers do not allow temporary
3869 # simulaneous installation, so unlike !atom
3870 # blockers, !!atom blockers aren't ignored
3871 # when they match other packages occupying
3874 if parent.operation == "nomerge" and \
3875 pkg.operation == "nomerge":
3876 # This blocker will be handled the next time that a
3877 # merge of either package is triggered.
3880 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3882 # Maybe the blocking package can be
3883 # unmerged to resolve this block.
3884 if parent.operation == "merge" and pkg.installed:
3885 depends_on_order.add((pkg, parent))
3887 elif parent.operation == "nomerge":
3888 depends_on_order.add((parent, pkg))
3890 # None of the above blocker resolutions techniques apply,
3891 # so apparently this one is unresolvable.
3892 unresolved_blocks = True
3894 # Make sure we don't unmerge any package that have been pulled
3896 if not unresolved_blocks and depends_on_order:
3897 for inst_pkg, inst_task in depends_on_order:
3898 if self._dynamic_config.digraph.contains(inst_pkg) and \
3899 self._dynamic_config.digraph.parent_nodes(inst_pkg):
3900 unresolved_blocks = True
3903 if not unresolved_blocks and depends_on_order:
3904 for inst_pkg, inst_task in depends_on_order:
3905 uninst_task = Package(built=inst_pkg.built,
3906 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3907 metadata=inst_pkg.metadata,
3908 operation="uninstall",
3909 root_config=inst_pkg.root_config,
3910 type_name=inst_pkg.type_name)
3911 # Enforce correct merge order with a hard dep.
3912 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
3913 priority=BlockerDepPriority.instance)
3914 # Count references to this blocker so that it can be
3915 # invalidated after nodes referencing it have been
3917 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
3918 if not unresolved_blocks and not depends_on_order:
3919 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
3920 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
3921 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
3922 self._dynamic_config._blocker_parents.remove(blocker)
3923 if not self._dynamic_config._blocker_parents.child_nodes(parent):
3924 self._dynamic_config._blocker_parents.remove(parent)
3925 if unresolved_blocks:
3926 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
3930 def _accept_blocker_conflicts(self):
3932 for x in ("--buildpkgonly", "--fetchonly",
3933 "--fetch-all-uri", "--nodeps"):
3934 if x in self._frozen_config.myopts:
3939 def _merge_order_bias(self, mygraph):
3941 For optimal leaf node selection, promote deep system runtime deps and
3942 order nodes from highest to lowest overall reference count.
3946 for node in mygraph.order:
3947 node_info[node] = len(mygraph.parent_nodes(node))
3948 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
3950 def cmp_merge_preference(node1, node2):
3952 if node1.operation == 'uninstall':
3953 if node2.operation == 'uninstall':
3957 if node2.operation == 'uninstall':
3958 if node1.operation == 'uninstall':
3962 node1_sys = node1 in deep_system_deps
3963 node2_sys = node2 in deep_system_deps
3964 if node1_sys != node2_sys:
3969 return node_info[node2] - node_info[node1]
3971 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3973 def altlist(self, reversed=False):
3975 while self._dynamic_config._serialized_tasks_cache is None:
3976 self._resolve_conflicts()
3978 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3979 self._serialize_tasks()
3980 except self._serialize_tasks_retry:
3983 retlist = self._dynamic_config._serialized_tasks_cache[:]
3988 def schedulerGraph(self):
3990 The scheduler graph is identical to the normal one except that
3991 uninstall edges are reversed in specific cases that require
3992 conflicting packages to be temporarily installed simultaneously.
3993 This is intended for use by the Scheduler in it's parallelization
3994 logic. It ensures that temporary simultaneous installation of
3995 conflicting packages is avoided when appropriate (especially for
3996 !!atom blockers), but allowed in specific cases that require it.
3998 Note that this method calls break_refs() which alters the state of
3999 internal Package instances such that this depgraph instance should
4000 not be used to perform any more calculations.
4003 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4004 mergelist = self.altlist()
4005 self.break_refs(mergelist)
4006 self.break_refs(self._dynamic_config._scheduler_graph.order)
4008 # Break DepPriority.satisfied attributes which reference
4009 # installed Package instances.
4010 for parents, children, node in \
4011 self._dynamic_config._scheduler_graph.nodes.values():
4012 for priorities in chain(parents.values(), children.values()):
4013 for priority in priorities:
4014 if priority.satisfied:
4015 priority.satisfied = True
4017 pkg_cache = self._frozen_config._pkg_cache
4018 graph = self._dynamic_config._scheduler_graph
4019 trees = self._frozen_config.trees
4020 pruned_pkg_cache = {}
4021 for pkg in pkg_cache:
4022 if pkg in graph or \
4023 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4024 pruned_pkg_cache[pkg] = pkg
4027 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4028 self.break_refs(trees[root]['vartree'].dbapi)
4030 self.break_refs(pruned_pkg_cache)
4032 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4036 def break_refs(self, nodes):
4038 Take a mergelist like that returned from self.altlist() and
4039 break any references that lead back to the depgraph. This is
4040 useful if you want to hold references to packages without
4041 also holding the depgraph on the heap.
4044 if hasattr(node, "root_config"):
4045 # The FakeVartree references the _package_cache which
4046 # references the depgraph. So that Package instances don't
4047 # hold the depgraph and FakeVartree on the heap, replace
4048 # the RootConfig that references the FakeVartree with the
4049 # original RootConfig instance which references the actual
4051 node.root_config = \
4052 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
4054 def _resolve_conflicts(self):
4055 if not self._complete_graph():
4056 raise self._unknown_internal_error()
4058 if not self._validate_blockers():
4059 self._dynamic_config._skip_restart = True
4060 raise self._unknown_internal_error()
4062 if self._dynamic_config._slot_collision_info:
4063 self._process_slot_conflicts()
4065 def _serialize_tasks(self):
4067 if "--debug" in self._frozen_config.myopts:
4068 writemsg("\ndigraph:\n\n", noiselevel=-1)
4069 self._dynamic_config.digraph.debug_print()
4070 writemsg("\n", noiselevel=-1)
4072 scheduler_graph = self._dynamic_config.digraph.copy()
4074 if '--nodeps' in self._frozen_config.myopts:
4075 # Preserve the package order given on the command line.
4076 return ([node for node in scheduler_graph \
4077 if isinstance(node, Package) \
4078 and node.operation == 'merge'], scheduler_graph)
4080 mygraph=self._dynamic_config.digraph.copy()
4082 removed_nodes = set()
4084 # Prune off all DependencyArg instances since they aren't
4085 # needed, and because of nested sets this is faster than doing
4086 # it with multiple digraph.root_nodes() calls below. This also
4087 # takes care of nested sets that have circular references,
4088 # which wouldn't be matched by digraph.root_nodes().
4089 for node in mygraph:
4090 if isinstance(node, DependencyArg):
4091 removed_nodes.add(node)
4093 mygraph.difference_update(removed_nodes)
4094 removed_nodes.clear()
4096 # Prune "nomerge" root nodes if nothing depends on them, since
4097 # otherwise they slow down merge order calculation. Don't remove
4098 # non-root nodes since they help optimize merge order in some cases
4099 # such as revdep-rebuild.
4102 for node in mygraph.root_nodes():
4103 if not isinstance(node, Package) or \
4104 node.installed or node.onlydeps:
4105 removed_nodes.add(node)
4107 self._spinner_update()
4108 mygraph.difference_update(removed_nodes)
4109 if not removed_nodes:
4111 removed_nodes.clear()
4112 self._merge_order_bias(mygraph)
4113 def cmp_circular_bias(n1, n2):
4115 RDEPEND is stronger than PDEPEND and this function
4116 measures such a strength bias within a circular
4117 dependency relationship.
4119 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4120 ignore_priority=priority_range.ignore_medium_soft)
4121 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4122 ignore_priority=priority_range.ignore_medium_soft)
4123 if n1_n2_medium == n2_n1_medium:
4128 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4130 # Contains uninstall tasks that have been scheduled to
4131 # occur after overlapping blockers have been installed.
4132 scheduled_uninstalls = set()
4133 # Contains any Uninstall tasks that have been ignored
4134 # in order to avoid the circular deps code path. These
4135 # correspond to blocker conflicts that could not be
4137 ignored_uninstall_tasks = set()
4138 have_uninstall_task = False
4139 complete = "complete" in self._dynamic_config.myparams
4142 def get_nodes(**kwargs):
4144 Returns leaf nodes excluding Uninstall instances
4145 since those should be executed as late as possible.
4147 return [node for node in mygraph.leaf_nodes(**kwargs) \
4148 if isinstance(node, Package) and \
4149 (node.operation != "uninstall" or \
4150 node in scheduled_uninstalls)]
4152 # sys-apps/portage needs special treatment if ROOT="/"
4153 running_root = self._frozen_config._running_root.root
4154 runtime_deps = InternalPackageSet(
4155 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4156 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4157 PORTAGE_PACKAGE_ATOM)
4158 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4159 PORTAGE_PACKAGE_ATOM)
4162 running_portage = running_portage[0]
4164 running_portage = None
4166 if replacement_portage:
4167 replacement_portage = replacement_portage[0]
4169 replacement_portage = None
4171 if replacement_portage == running_portage:
4172 replacement_portage = None
4174 if replacement_portage is not None and \
4175 (running_portage is None or \
4176 running_portage.cpv != replacement_portage.cpv or \
4177 '9999' in replacement_portage.cpv or \
4178 'git' in replacement_portage.inherited):
4179 # update from running_portage to replacement_portage asap
4180 asap_nodes.append(replacement_portage)
4182 if running_portage is not None:
4184 portage_rdepend = self._select_atoms_highest_available(
4185 running_root, running_portage.metadata["RDEPEND"],
4186 myuse=self._pkg_use_enabled(running_portage),
4187 parent=running_portage, strict=False)
4188 except portage.exception.InvalidDependString as e:
4189 portage.writemsg("!!! Invalid RDEPEND in " + \
4190 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4191 (running_root, running_portage.cpv, e), noiselevel=-1)
4193 portage_rdepend = {running_portage : []}
4194 for atoms in portage_rdepend.values():
4195 runtime_deps.update(atom for atom in atoms \
4196 if not atom.blocker)
4198 # Merge libc asap, in order to account for implicit
4199 # dependencies. See bug #303567.
4200 for root in (running_root,):
4201 libc_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
4202 portage.const.LIBC_PACKAGE_ATOM)
4204 libc_pkg = libc_pkg[0]
4205 if libc_pkg.operation == 'merge':
4206 # Only add a dep when the version changes.
4207 if not libc_pkg.root_config.trees[
4208 'vartree'].dbapi.cpv_exists(libc_pkg.cpv):
4210 # If there's also an os-headers upgrade, we need to
4211 # pull that in first. See bug #328317.
4212 os_headers_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
4213 portage.const.OS_HEADERS_PACKAGE_ATOM)
4215 os_headers_pkg = os_headers_pkg[0]
4216 if os_headers_pkg.operation == 'merge':
4217 # Only add a dep when the version changes.
4218 if not os_headers_pkg.root_config.trees[
4219 'vartree'].dbapi.cpv_exists(os_headers_pkg.cpv):
4220 asap_nodes.append(os_headers_pkg)
4222 asap_nodes.append(libc_pkg)
4224 def gather_deps(ignore_priority, mergeable_nodes,
4225 selected_nodes, node):
4227 Recursively gather a group of nodes that RDEPEND on
4228 eachother. This ensures that they are merged as a group
4229 and get their RDEPENDs satisfied as soon as possible.
4231 if node in selected_nodes:
4233 if node not in mergeable_nodes:
4235 if node == replacement_portage and \
4236 mygraph.child_nodes(node,
4237 ignore_priority=priority_range.ignore_medium_soft):
4238 # Make sure that portage always has all of it's
4239 # RDEPENDs installed first.
4241 selected_nodes.add(node)
4242 for child in mygraph.child_nodes(node,
4243 ignore_priority=ignore_priority):
4244 if not gather_deps(ignore_priority,
4245 mergeable_nodes, selected_nodes, child):
4249 def ignore_uninst_or_med(priority):
4250 if priority is BlockerDepPriority.instance:
4252 return priority_range.ignore_medium(priority)
4254 def ignore_uninst_or_med_soft(priority):
4255 if priority is BlockerDepPriority.instance:
4257 return priority_range.ignore_medium_soft(priority)
4259 tree_mode = "--tree" in self._frozen_config.myopts
4260 # Tracks whether or not the current iteration should prefer asap_nodes
4261 # if available. This is set to False when the previous iteration
4262 # failed to select any nodes. It is reset whenever nodes are
4263 # successfully selected.
4266 # Controls whether or not the current iteration should drop edges that
4267 # are "satisfied" by installed packages, in order to solve circular
4268 # dependencies. The deep runtime dependencies of installed packages are
4269 # not checked in this case (bug #199856), so it must be avoided
4270 # whenever possible.
4271 drop_satisfied = False
4273 # State of variables for successive iterations that loosen the
4274 # criteria for node selection.
4276 # iteration prefer_asap drop_satisfied
4281 # If no nodes are selected on the last iteration, it is due to
4282 # unresolved blockers or circular dependencies.
4284 while not mygraph.empty():
4285 self._spinner_update()
4286 selected_nodes = None
4287 ignore_priority = None
4288 if drop_satisfied or (prefer_asap and asap_nodes):
4289 priority_range = DepPrioritySatisfiedRange
4291 priority_range = DepPriorityNormalRange
4292 if prefer_asap and asap_nodes:
4293 # ASAP nodes are merged before their soft deps. Go ahead and
4294 # select root nodes here if necessary, since it's typical for
4295 # the parent to have been removed from the graph already.
4296 asap_nodes = [node for node in asap_nodes \
4297 if mygraph.contains(node)]
4298 for node in asap_nodes:
4299 if not mygraph.child_nodes(node,
4300 ignore_priority=priority_range.ignore_soft):
4301 selected_nodes = [node]
4302 asap_nodes.remove(node)
4304 if not selected_nodes and \
4305 not (prefer_asap and asap_nodes):
4306 for i in range(priority_range.NONE,
4307 priority_range.MEDIUM_SOFT + 1):
4308 ignore_priority = priority_range.ignore_priority[i]
4309 nodes = get_nodes(ignore_priority=ignore_priority)
4311 # If there is a mixture of merges and uninstalls,
4312 # do the uninstalls first.
4314 good_uninstalls = []
4316 if node.operation == "uninstall":
4317 good_uninstalls.append(node)
4320 nodes = good_uninstalls
4324 if ignore_priority is None and not tree_mode:
4325 # Greedily pop all of these nodes since no
4326 # relationship has been ignored. This optimization
4327 # destroys --tree output, so it's disabled in tree
4329 selected_nodes = nodes
4331 # For optimal merge order:
4332 # * Only pop one node.
4333 # * Removing a root node (node without a parent)
4334 # will not produce a leaf node, so avoid it.
4335 # * It's normal for a selected uninstall to be a
4336 # root node, so don't check them for parents.
4338 if node.operation == "uninstall" or \
4339 mygraph.parent_nodes(node):
4340 selected_nodes = [node]
4346 if not selected_nodes:
4347 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4349 mergeable_nodes = set(nodes)
4350 if prefer_asap and asap_nodes:
4352 for i in range(priority_range.SOFT,
4353 priority_range.MEDIUM_SOFT + 1):
4354 ignore_priority = priority_range.ignore_priority[i]
4356 if not mygraph.parent_nodes(node):
4358 selected_nodes = set()
4359 if gather_deps(ignore_priority,
4360 mergeable_nodes, selected_nodes, node):
4363 selected_nodes = None
4367 if prefer_asap and asap_nodes and not selected_nodes:
4368 # We failed to find any asap nodes to merge, so ignore
4369 # them for the next iteration.
4373 if selected_nodes and ignore_priority is not None:
4374 # Try to merge ignored medium_soft deps as soon as possible
4375 # if they're not satisfied by installed packages.
4376 for node in selected_nodes:
4377 children = set(mygraph.child_nodes(node))
4378 soft = children.difference(
4379 mygraph.child_nodes(node,
4380 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4381 medium_soft = children.difference(
4382 mygraph.child_nodes(node,
4384 DepPrioritySatisfiedRange.ignore_medium_soft))
4385 medium_soft.difference_update(soft)
4386 for child in medium_soft:
4387 if child in selected_nodes:
4389 if child in asap_nodes:
4391 asap_nodes.append(child)
4393 if selected_nodes and len(selected_nodes) > 1:
4394 if not isinstance(selected_nodes, list):
4395 selected_nodes = list(selected_nodes)
4396 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4398 if not selected_nodes and not myblocker_uninstalls.is_empty():
4399 # An Uninstall task needs to be executed in order to
4400 # avoid conflict if possible.
4403 priority_range = DepPrioritySatisfiedRange
4405 priority_range = DepPriorityNormalRange
4407 mergeable_nodes = get_nodes(
4408 ignore_priority=ignore_uninst_or_med)
4410 min_parent_deps = None
4413 for task in myblocker_uninstalls.leaf_nodes():
4414 # Do some sanity checks so that system or world packages
4415 # don't get uninstalled inappropriately here (only really
4416 # necessary when --complete-graph has not been enabled).
4418 if task in ignored_uninstall_tasks:
4421 if task in scheduled_uninstalls:
4422 # It's been scheduled but it hasn't
4423 # been executed yet due to dependence
4424 # on installation of blocking packages.
4427 root_config = self._frozen_config.roots[task.root]
4428 inst_pkg = self._pkg(task.cpv, "installed", root_config,
4431 if self._dynamic_config.digraph.contains(inst_pkg):
4434 forbid_overlap = False
4435 heuristic_overlap = False
4436 for blocker in myblocker_uninstalls.parent_nodes(task):
4437 if not eapi_has_strong_blocks(blocker.eapi):
4438 heuristic_overlap = True
4439 elif blocker.atom.blocker.overlap.forbid:
4440 forbid_overlap = True
4442 if forbid_overlap and running_root == task.root:
4445 if heuristic_overlap and running_root == task.root:
4446 # Never uninstall sys-apps/portage or it's essential
4447 # dependencies, except through replacement.
4449 runtime_dep_atoms = \
4450 list(runtime_deps.iterAtomsForPackage(task))
4451 except portage.exception.InvalidDependString as e:
4452 portage.writemsg("!!! Invalid PROVIDE in " + \
4453 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4454 (task.root, task.cpv, e), noiselevel=-1)
4458 # Don't uninstall a runtime dep if it appears
4459 # to be the only suitable one installed.
4461 vardb = root_config.trees["vartree"].dbapi
4462 for atom in runtime_dep_atoms:
4463 other_version = None
4464 for pkg in vardb.match_pkgs(atom):
4465 if pkg.cpv == task.cpv and \
4466 pkg.metadata["COUNTER"] == \
4467 task.metadata["COUNTER"]:
4471 if other_version is None:
4477 # For packages in the system set, don't take
4478 # any chances. If the conflict can't be resolved
4479 # by a normal replacement operation then abort.
4482 for atom in root_config.sets[
4483 "system"].iterAtomsForPackage(task):
4486 except portage.exception.InvalidDependString as e:
4487 portage.writemsg("!!! Invalid PROVIDE in " + \
4488 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4489 (task.root, task.cpv, e), noiselevel=-1)
4495 # Note that the world check isn't always
4496 # necessary since self._complete_graph() will
4497 # add all packages from the system and world sets to the
4498 # graph. This just allows unresolved conflicts to be
4499 # detected as early as possible, which makes it possible
4500 # to avoid calling self._complete_graph() when it is
4501 # unnecessary due to blockers triggering an abortion.
4503 # For packages in the world set, go ahead an uninstall
4504 # when necessary, as long as the atom will be satisfied
4505 # in the final state.
4506 graph_db = self._dynamic_config.mydbapi[task.root]
4509 for atom in root_config.sets[
4510 "selected"].iterAtomsForPackage(task):
4512 for pkg in graph_db.match_pkgs(atom):
4519 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
4521 except portage.exception.InvalidDependString as e:
4522 portage.writemsg("!!! Invalid PROVIDE in " + \
4523 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4524 (task.root, task.cpv, e), noiselevel=-1)
4530 # Check the deps of parent nodes to ensure that
4531 # the chosen task produces a leaf node. Maybe
4532 # this can be optimized some more to make the
4533 # best possible choice, but the current algorithm
4534 # is simple and should be near optimal for most
4536 self._spinner_update()
4537 mergeable_parent = False
4539 parent_deps.add(task)
4540 for parent in mygraph.parent_nodes(task):
4541 parent_deps.update(mygraph.child_nodes(parent,
4542 ignore_priority=priority_range.ignore_medium_soft))
4543 if min_parent_deps is not None and \
4544 len(parent_deps) >= min_parent_deps:
4545 # This task is no better than a previously selected
4546 # task, so abort search now in order to avoid wasting
4547 # any more cpu time on this task. This increases
4548 # performance dramatically in cases when there are
4549 # hundreds of blockers to solve, like when
4550 # upgrading to a new slot of kde-meta.
4551 mergeable_parent = None
4553 if parent in mergeable_nodes and \
4554 gather_deps(ignore_uninst_or_med_soft,
4555 mergeable_nodes, set(), parent):
4556 mergeable_parent = True
4558 if not mergeable_parent:
4561 if min_parent_deps is None or \
4562 len(parent_deps) < min_parent_deps:
4563 min_parent_deps = len(parent_deps)
4566 if uninst_task is not None and min_parent_deps == 1:
4567 # This is the best possible result, so so abort search
4568 # now in order to avoid wasting any more cpu time.
4571 if uninst_task is not None:
4572 # The uninstall is performed only after blocking
4573 # packages have been merged on top of it. File
4574 # collisions between blocking packages are detected
4575 # and removed from the list of files to be uninstalled.
4576 scheduled_uninstalls.add(uninst_task)
4577 parent_nodes = mygraph.parent_nodes(uninst_task)
4579 # Reverse the parent -> uninstall edges since we want
4580 # to do the uninstall after blocking packages have
4581 # been merged on top of it.
4582 mygraph.remove(uninst_task)
4583 for blocked_pkg in parent_nodes:
4584 mygraph.add(blocked_pkg, uninst_task,
4585 priority=BlockerDepPriority.instance)
4586 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4587 scheduler_graph.add(blocked_pkg, uninst_task,
4588 priority=BlockerDepPriority.instance)
4590 # Sometimes a merge node will render an uninstall
4591 # node unnecessary (due to occupying the same SLOT),
4592 # and we want to avoid executing a separate uninstall
4593 # task in that case.
4594 slot_node = self._dynamic_config.mydbapi[uninst_task.root
4595 ].match_pkgs(uninst_task.slot_atom)
4597 slot_node[0].operation == "merge":
4598 mygraph.add(slot_node[0], uninst_task,
4599 priority=BlockerDepPriority.instance)
4601 # Reset the state variables for leaf node selection and
4602 # continue trying to select leaf nodes.
4604 drop_satisfied = False
4607 if not selected_nodes:
4608 # Only select root nodes as a last resort. This case should
4609 # only trigger when the graph is nearly empty and the only
4610 # remaining nodes are isolated (no parents or children). Since
4611 # the nodes must be isolated, ignore_priority is not needed.
4612 selected_nodes = get_nodes()
4614 if not selected_nodes and not drop_satisfied:
4615 drop_satisfied = True
4618 if not selected_nodes and not myblocker_uninstalls.is_empty():
4619 # If possible, drop an uninstall task here in order to avoid
4620 # the circular deps code path. The corresponding blocker will
4621 # still be counted as an unresolved conflict.
4623 for node in myblocker_uninstalls.leaf_nodes():
4625 mygraph.remove(node)
4630 ignored_uninstall_tasks.add(node)
4633 if uninst_task is not None:
4634 # Reset the state variables for leaf node selection and
4635 # continue trying to select leaf nodes.
4637 drop_satisfied = False
4640 if not selected_nodes:
4641 self._dynamic_config._circular_deps_for_display = mygraph
4642 self._dynamic_config._skip_restart = True
4643 raise self._unknown_internal_error()
4645 # At this point, we've succeeded in selecting one or more nodes, so
4646 # reset state variables for leaf node selection.
4648 drop_satisfied = False
4650 mygraph.difference_update(selected_nodes)
4652 for node in selected_nodes:
4653 if isinstance(node, Package) and \
4654 node.operation == "nomerge":
4657 # Handle interactions between blockers
4658 # and uninstallation tasks.
4659 solved_blockers = set()
4661 if isinstance(node, Package) and \
4662 "uninstall" == node.operation:
4663 have_uninstall_task = True
4666 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
4667 inst_pkg = vardb.match_pkgs(node.slot_atom)
4669 # The package will be replaced by this one, so remove
4670 # the corresponding Uninstall task if necessary.
4671 inst_pkg = inst_pkg[0]
4672 uninst_task = Package(built=inst_pkg.built,
4673 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4674 metadata=inst_pkg.metadata,
4675 operation="uninstall",
4676 root_config=inst_pkg.root_config,
4677 type_name=inst_pkg.type_name)
4679 mygraph.remove(uninst_task)
4683 if uninst_task is not None and \
4684 uninst_task not in ignored_uninstall_tasks and \
4685 myblocker_uninstalls.contains(uninst_task):
4686 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4687 myblocker_uninstalls.remove(uninst_task)
4688 # Discard any blockers that this Uninstall solves.
4689 for blocker in blocker_nodes:
4690 if not myblocker_uninstalls.child_nodes(blocker):
4691 myblocker_uninstalls.remove(blocker)
4693 self._dynamic_config._unsolvable_blockers:
4694 solved_blockers.add(blocker)
4696 retlist.append(node)
4698 if (isinstance(node, Package) and \
4699 "uninstall" == node.operation) or \
4700 (uninst_task is not None and \
4701 uninst_task in scheduled_uninstalls):
4702 # Include satisfied blockers in the merge list
4703 # since the user might be interested and also
4704 # it serves as an indicator that blocking packages
4705 # will be temporarily installed simultaneously.
4706 for blocker in solved_blockers:
4707 retlist.append(blocker)
4709 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
4710 for node in myblocker_uninstalls.root_nodes():
4711 unsolvable_blockers.add(node)
4713 # If any Uninstall tasks need to be executed in order
4714 # to avoid a conflict, complete the graph with any
4715 # dependencies that may have been initially
4716 # neglected (to ensure that unsafe Uninstall tasks
4717 # are properly identified and blocked from execution).
4718 if have_uninstall_task and \
4720 not unsolvable_blockers:
4721 self._dynamic_config.myparams["complete"] = True
4722 if '--debug' in self._frozen_config.myopts:
4724 msg.append("enabling 'complete' depgraph mode " + \
4725 "due to uninstall task(s):")
4727 for node in retlist:
4728 if isinstance(node, Package) and \
4729 node.operation == 'uninstall':
4730 msg.append("\t%s" % (node,))
4731 writemsg_level("\n%s\n" % \
4732 "".join("%s\n" % line for line in msg),
4733 level=logging.DEBUG, noiselevel=-1)
4734 raise self._serialize_tasks_retry("")
4736 # Set satisfied state on blockers, but not before the
4737 # above retry path, since we don't want to modify the
4738 # state in that case.
4739 for node in retlist:
4740 if isinstance(node, Blocker):
4741 node.satisfied = True
4743 for blocker in unsolvable_blockers:
4744 retlist.append(blocker)
4746 if unsolvable_blockers and \
4747 not self._accept_blocker_conflicts():
4748 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
4749 self._dynamic_config._serialized_tasks_cache = retlist[:]
4750 self._dynamic_config._scheduler_graph = scheduler_graph
4751 self._dynamic_config._skip_restart = True
4752 raise self._unknown_internal_error()
4754 if self._dynamic_config._slot_collision_info and \
4755 not self._accept_blocker_conflicts():
4756 self._dynamic_config._serialized_tasks_cache = retlist[:]
4757 self._dynamic_config._scheduler_graph = scheduler_graph
4758 raise self._unknown_internal_error()
4760 return retlist, scheduler_graph
4762 def _show_circular_deps(self, mygraph):
4763 self._dynamic_config._circular_dependency_handler = \
4764 circular_dependency_handler(self, mygraph)
4765 handler = self._dynamic_config._circular_dependency_handler
4767 self._frozen_config.myopts.pop("--quiet", None)
4768 self._frozen_config.myopts["--verbose"] = True
4769 self._frozen_config.myopts["--tree"] = True
4770 portage.writemsg("\n\n", noiselevel=-1)
4771 self.display(handler.merge_list)
4772 prefix = colorize("BAD", " * ")
4773 portage.writemsg("\n", noiselevel=-1)
4774 portage.writemsg(prefix + "Error: circular dependencies:\n",
4776 portage.writemsg("\n", noiselevel=-1)
4778 if handler.circular_dep_message is None or \
4779 "--debug" in self._frozen_config.myopts:
4780 handler.debug_print()
4781 portage.writemsg("\n", noiselevel=-1)
4783 if handler.circular_dep_message is not None:
4784 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
4786 suggestions = handler.suggestions
4788 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
4789 if len(suggestions) == 1:
4790 writemsg("by applying the following change:\n", noiselevel=-1)
4792 writemsg("by applying " + colorize("bold", "any of") + \
4793 " the following changes:\n", noiselevel=-1)
4794 writemsg("".join(suggestions), noiselevel=-1)
4795 writemsg("\nNote that this change can be reverted, once the package has" + \
4796 " been installed.\n", noiselevel=-1)
4797 if handler.large_cycle_count:
4798 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
4799 "Several changes might be required to resolve all cycles.\n" + \
4800 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
4802 writemsg("\n\n", noiselevel=-1)
4803 writemsg(prefix + "Note that circular dependencies " + \
4804 "can often be avoided by temporarily\n", noiselevel=-1)
4805 writemsg(prefix + "disabling USE flags that trigger " + \
4806 "optional dependencies.\n", noiselevel=-1)
4808 def _show_merge_list(self):
4809 if self._dynamic_config._serialized_tasks_cache is not None and \
4810 not (self._dynamic_config._displayed_list and \
4811 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
4812 self._dynamic_config._displayed_list == \
4813 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
4814 display_list = self._dynamic_config._serialized_tasks_cache[:]
4815 if "--tree" in self._frozen_config.myopts:
4816 display_list.reverse()
4817 self.display(display_list)
4819 def _show_unsatisfied_blockers(self, blockers):
4820 self._show_merge_list()
4821 msg = "Error: The above package list contains " + \
4822 "packages which cannot be installed " + \
4823 "at the same time on the same system."
4824 prefix = colorize("BAD", " * ")
4825 portage.writemsg("\n", noiselevel=-1)
4826 for line in textwrap.wrap(msg, 70):
4827 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4829 # Display the conflicting packages along with the packages
4830 # that pulled them in. This is helpful for troubleshooting
4831 # cases in which blockers don't solve automatically and
4832 # the reasons are not apparent from the normal merge list
4836 for blocker in blockers:
4837 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
4838 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
4839 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4840 if not parent_atoms:
4841 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
4842 if atom is not None:
4843 parent_atoms = set([("@selected", atom)])
4845 conflict_pkgs[pkg] = parent_atoms
4848 # Reduce noise by pruning packages that are only
4849 # pulled in by other conflict packages.
4851 for pkg, parent_atoms in conflict_pkgs.items():
4852 relevant_parent = False
4853 for parent, atom in parent_atoms:
4854 if parent not in conflict_pkgs:
4855 relevant_parent = True
4857 if not relevant_parent:
4858 pruned_pkgs.add(pkg)
4859 for pkg in pruned_pkgs:
4860 del conflict_pkgs[pkg]
4866 # Max number of parents shown, to avoid flooding the display.
4868 for pkg, parent_atoms in conflict_pkgs.items():
4872 # Prefer packages that are not directly involved in a conflict.
4873 for parent_atom in parent_atoms:
4874 if len(pruned_list) >= max_parents:
4876 parent, atom = parent_atom
4877 if parent not in conflict_pkgs:
4878 pruned_list.add(parent_atom)
4880 for parent_atom in parent_atoms:
4881 if len(pruned_list) >= max_parents:
4883 pruned_list.add(parent_atom)
4885 omitted_parents = len(parent_atoms) - len(pruned_list)
4886 msg.append(indent + "%s pulled in by\n" % pkg)
4888 for parent_atom in pruned_list:
4889 parent, atom = parent_atom
4890 msg.append(2*indent)
4891 if isinstance(parent,
4892 (PackageArg, AtomArg)):
4893 # For PackageArg and AtomArg types, it's
4894 # redundant to display the atom attribute.
4895 msg.append(str(parent))
4897 # Display the specific atom from SetArg or
4899 msg.append("%s required by %s" % (atom, parent))
4903 msg.append(2*indent)
4904 msg.append("(and %d more)\n" % omitted_parents)
4908 sys.stderr.write("".join(msg))
4911 if "--quiet" not in self._frozen_config.myopts:
4912 show_blocker_docs_link()
4914 def display(self, mylist, favorites=[], verbosity=None):
4916 # This is used to prevent display_problems() from
4917 # redundantly displaying this exact same merge list
4918 # again via _show_merge_list().
4919 self._dynamic_config._displayed_list = mylist
4921 return display(self, mylist, favorites, verbosity)
4923 def display_problems(self):
4925 Display problems with the dependency graph such as slot collisions.
4926 This is called internally by display() to show the problems _after_
4927 the merge list where it is most likely to be seen, but if display()
4928 is not going to be called then this method should be called explicitly
4929 to ensure that the user is notified of problems with the graph.
4931 All output goes to stderr, except for unsatisfied dependencies which
4932 go to stdout for parsing by programs such as autounmask.
4935 # Note that show_masked_packages() sends it's output to
4936 # stdout, and some programs such as autounmask parse the
4937 # output in cases when emerge bails out. However, when
4938 # show_masked_packages() is called for installed packages
4939 # here, the message is a warning that is more appropriate
4940 # to send to stderr, so temporarily redirect stdout to
4941 # stderr. TODO: Fix output code so there's a cleaner way
4942 # to redirect everything to stderr.
4947 sys.stdout = sys.stderr
4948 self._display_problems()
4954 # This goes to stdout for parsing by programs like autounmask.
4955 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4956 self._show_unsatisfied_dep(*pargs, **kwargs)
4958 def _display_problems(self):
4959 if self._dynamic_config._circular_deps_for_display is not None:
4960 self._show_circular_deps(
4961 self._dynamic_config._circular_deps_for_display)
4963 # The user is only notified of a slot conflict if
4964 # there are no unresolvable blocker conflicts.
4965 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4966 self._show_unsatisfied_blockers(
4967 self._dynamic_config._unsatisfied_blockers_for_display)
4968 elif self._dynamic_config._slot_collision_info:
4969 self._show_slot_collision_notice()
4971 self._show_missed_update()
4973 unstable_keyword_msg = []
4974 for pkg in self._dynamic_config._needed_unstable_keywords:
4975 self._show_merge_list()
4976 if pkg in self._dynamic_config.digraph:
4977 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4978 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
4979 use=self._pkg_use_enabled(pkg))
4980 for reason in mreasons:
4981 if reason.unmask_hint and \
4982 reason.unmask_hint.key == 'unstable keyword':
4983 keyword = reason.unmask_hint.value
4985 unstable_keyword_msg.append(self._get_dep_chain_as_comment(pkg))
4986 unstable_keyword_msg.append("=%s %s\n" % (pkg.cpv, keyword))
4988 use_changes_msg = []
4989 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
4990 self._show_merge_list()
4991 if pkg in self._dynamic_config.digraph:
4992 changes = needed_use_config_change[1]
4994 for flag, state in changes.items():
4996 adjustments.append(flag)
4998 adjustments.append("-" + flag)
4999 use_changes_msg.append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5000 use_changes_msg.append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5003 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5004 self._show_merge_list()
5005 if pkg in self._dynamic_config.digraph:
5006 license_msg.append(self._get_dep_chain_as_comment(pkg))
5007 license_msg.append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5009 if unstable_keyword_msg:
5010 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
5011 " are necessary to proceed:\n", noiselevel=-1)
5012 writemsg_stdout("".join(unstable_keyword_msg), noiselevel=-1)
5015 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
5016 " are necessary to proceed:\n", noiselevel=-1)
5017 writemsg_stdout("".join(use_changes_msg), noiselevel=-1)
5020 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
5021 " are necessary to proceed:\n", noiselevel=-1)
5022 writemsg_stdout("".join(license_msg), noiselevel=-1)
5024 # TODO: Add generic support for "set problem" handlers so that
5025 # the below warnings aren't special cases for world only.
5027 if self._dynamic_config._missing_args:
5028 world_problems = False
5029 if "world" in self._dynamic_config.sets[
5030 self._frozen_config.target_root].sets:
5031 # Filter out indirect members of world (from nested sets)
5032 # since only direct members of world are desired here.
5033 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
5034 for arg, atom in self._dynamic_config._missing_args:
5035 if arg.name in ("selected", "world") and atom in world_set:
5036 world_problems = True
5040 sys.stderr.write("\n!!! Problems have been " + \
5041 "detected with your world file\n")
5042 sys.stderr.write("!!! Please run " + \
5043 green("emaint --check world")+"\n\n")
5045 if self._dynamic_config._missing_args:
5046 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5047 " Ebuilds for the following packages are either all\n")
5048 sys.stderr.write(colorize("BAD", "!!!") + \
5049 " masked or don't exist:\n")
5050 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5051 self._dynamic_config._missing_args) + "\n")
5053 if self._dynamic_config._pprovided_args:
5055 for arg, atom in self._dynamic_config._pprovided_args:
5056 if isinstance(arg, SetArg):
5058 arg_atom = (atom, atom)
5061 arg_atom = (arg.arg, atom)
5062 refs = arg_refs.setdefault(arg_atom, [])
5063 if parent not in refs:
5066 msg.append(bad("\nWARNING: "))
5067 if len(self._dynamic_config._pprovided_args) > 1:
5068 msg.append("Requested packages will not be " + \
5069 "merged because they are listed in\n")
5071 msg.append("A requested package will not be " + \
5072 "merged because it is listed in\n")
5073 msg.append("package.provided:\n\n")
5074 problems_sets = set()
5075 for (arg, atom), refs in arg_refs.items():
5078 problems_sets.update(refs)
5080 ref_string = ", ".join(["'%s'" % name for name in refs])
5081 ref_string = " pulled in by " + ref_string
5082 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5084 if "selected" in problems_sets or "world" in problems_sets:
5085 msg.append("This problem can be solved in one of the following ways:\n\n")
5086 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5087 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5088 msg.append(" C) Remove offending entries from package.provided.\n\n")
5089 msg.append("The best course of action depends on the reason that an offending\n")
5090 msg.append("package.provided entry exists.\n\n")
5091 sys.stderr.write("".join(msg))
5093 masked_packages = []
5094 for pkg in self._dynamic_config._masked_license_updates:
5095 root_config = pkg.root_config
5096 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5097 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
5098 masked_packages.append((root_config, pkgsettings,
5099 pkg.cpv, pkg.metadata, mreasons))
5101 writemsg("\n" + colorize("BAD", "!!!") + \
5102 " The following updates are masked by LICENSE changes:\n",
5104 show_masked_packages(masked_packages)
5106 writemsg("\n", noiselevel=-1)
5108 masked_packages = []
5109 for pkg in self._dynamic_config._masked_installed:
5110 root_config = pkg.root_config
5111 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5112 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
5113 masked_packages.append((root_config, pkgsettings,
5114 pkg.cpv, pkg.metadata, mreasons))
5116 writemsg("\n" + colorize("BAD", "!!!") + \
5117 " The following installed packages are masked:\n",
5119 show_masked_packages(masked_packages)
5121 writemsg("\n", noiselevel=-1)
5123 def saveNomergeFavorites(self):
5124 """Find atoms in favorites that are not in the mergelist and add them
5125 to the world file if necessary."""
5126 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5127 "--oneshot", "--onlydeps", "--pretend"):
5128 if x in self._frozen_config.myopts:
5130 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5131 world_set = root_config.sets["selected"]
5133 world_locked = False
5134 if hasattr(world_set, "lock"):
5138 if hasattr(world_set, "load"):
5139 world_set.load() # maybe it's changed on disk
5141 args_set = self._dynamic_config.sets[
5142 self._frozen_config.target_root].sets['__non_set_args__']
5143 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
5144 added_favorites = set()
5145 for x in self._dynamic_config._set_nodes:
5146 if x.operation != "nomerge":
5149 if x.root != root_config.root:
5153 myfavkey = create_world_atom(x, args_set, root_config)
5155 if myfavkey in added_favorites:
5157 added_favorites.add(myfavkey)
5158 except portage.exception.InvalidDependString as e:
5159 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5160 (x.cpv, e), noiselevel=-1)
5161 writemsg("!!! see '%s'\n\n" % os.path.join(
5162 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
5165 for arg in self._dynamic_config._initial_arg_list:
5166 if not isinstance(arg, SetArg):
5168 if arg.root_config.root != root_config.root:
5171 if k in ("selected", "world") or \
5172 not root_config.sets[k].world_candidate:
5177 all_added.append(SETPREFIX + k)
5178 all_added.extend(added_favorites)
5181 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
5182 colorize("INFORM", str(a)), noiselevel=-1)
5184 world_set.update(all_added)
5189 def _loadResumeCommand(self, resume_data, skip_masked=True,
5192 Add a resume command to the graph and validate it in the process. This
5193 will raise a PackageNotFound exception if a package is not available.
5198 if not isinstance(resume_data, dict):
5201 mergelist = resume_data.get("mergelist")
5202 if not isinstance(mergelist, list):
5205 fakedb = self._dynamic_config.mydbapi
5206 trees = self._frozen_config.trees
5207 serialized_tasks = []
5210 if not (isinstance(x, list) and len(x) == 4):
5212 pkg_type, myroot, pkg_key, action = x
5213 if pkg_type not in self.pkg_tree_map:
5215 if action != "merge":
5217 root_config = self._frozen_config.roots[myroot]
5219 pkg = self._pkg(pkg_key, pkg_type, root_config)
5220 except portage.exception.PackageNotFound:
5221 # It does no exist or it is corrupt.
5223 # TODO: log these somewhere
5227 if "merge" == pkg.operation and \
5228 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
5229 modified_use=self._pkg_use_enabled(pkg)):
5232 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
5234 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
5236 self._dynamic_config._unsatisfied_deps_for_display.append(
5237 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5239 fakedb[myroot].cpv_inject(pkg)
5240 serialized_tasks.append(pkg)
5241 self._spinner_update()
5243 if self._dynamic_config._unsatisfied_deps_for_display:
5246 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5247 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5248 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5250 self._select_package = self._select_pkg_from_graph
5251 self._dynamic_config.myparams["selective"] = True
5252 # Always traverse deep dependencies in order to account for
5253 # potentially unsatisfied dependencies of installed packages.
5254 # This is necessary for correct --keep-going or --resume operation
5255 # in case a package from a group of circularly dependent packages
5256 # fails. In this case, a package which has recently been installed
5257 # may have an unsatisfied circular dependency (pulled in by
5258 # PDEPEND, for example). So, even though a package is already
5259 # installed, it may not have all of it's dependencies satisfied, so
5260 # it may not be usable. If such a package is in the subgraph of
5261 # deep depenedencies of a scheduled build, that build needs to
5262 # be cancelled. In order for this type of situation to be
5263 # recognized, deep traversal of dependencies is required.
5264 self._dynamic_config.myparams["deep"] = True
5266 favorites = resume_data.get("favorites")
5267 args_set = self._dynamic_config.sets[
5268 self._frozen_config.target_root].sets['__non_set_args__']
5269 if isinstance(favorites, list):
5270 args = self._load_favorites(favorites)
5274 for task in serialized_tasks:
5275 if isinstance(task, Package) and \
5276 task.operation == "merge":
5277 if not self._add_pkg(task, None):
5280 # Packages for argument atoms need to be explicitly
5281 # added via _add_pkg() so that they are included in the
5282 # digraph (needed at least for --tree display).
5283 for arg in self._expand_set_args(args, add_to_digraph=True):
5284 for atom in arg.pset.getAtoms():
5285 pkg, existing_node = self._select_package(
5286 arg.root_config.root, atom)
5287 if existing_node is None and \
5289 if not self._add_pkg(pkg, Dependency(atom=atom,
5290 root=pkg.root, parent=arg)):
5293 # Allow unsatisfied deps here to avoid showing a masking
5294 # message for an unsatisfied dep that isn't necessarily
5296 if not self._create_graph(allow_unsatisfied=True):
5299 unsatisfied_deps = []
5300 for dep in self._dynamic_config._unsatisfied_deps:
5301 if not isinstance(dep.parent, Package):
5303 if dep.parent.operation == "merge":
5304 unsatisfied_deps.append(dep)
5307 # For unsatisfied deps of installed packages, only account for
5308 # them if they are in the subgraph of dependencies of a package
5309 # which is scheduled to be installed.
5310 unsatisfied_install = False
5312 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5314 node = dep_stack.pop()
5315 if not isinstance(node, Package):
5317 if node.operation == "merge":
5318 unsatisfied_install = True
5320 if node in traversed:
5323 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5325 if unsatisfied_install:
5326 unsatisfied_deps.append(dep)
5328 if masked_tasks or unsatisfied_deps:
5329 # This probably means that a required package
5330 # was dropped via --skipfirst. It makes the
5331 # resume list invalid, so convert it to a
5332 # UnsatisfiedResumeDep exception.
5333 raise self.UnsatisfiedResumeDep(self,
5334 masked_tasks + unsatisfied_deps)
5335 self._dynamic_config._serialized_tasks_cache = None
5338 except self._unknown_internal_error:
5343 def _load_favorites(self, favorites):
5345 Use a list of favorites to resume state from a
5346 previous select_files() call. This creates similar
5347 DependencyArg instances to those that would have
5348 been created by the original select_files() call.
5349 This allows Package instances to be matched with
5350 DependencyArg instances during graph creation.
5352 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5353 sets = root_config.sets
5354 depgraph_sets = self._dynamic_config.sets[root_config.root]
5357 if not isinstance(x, basestring):
5359 if x in ("system", "world"):
5361 if x.startswith(SETPREFIX):
5362 s = x[len(SETPREFIX):]
5365 if s in depgraph_sets.sets:
5368 depgraph_sets.sets[s] = pset
5369 args.append(SetArg(arg=x, pset=pset,
5370 root_config=root_config))
5374 except portage.exception.InvalidAtom:
5376 args.append(AtomArg(arg=x, atom=x,
5377 root_config=root_config))
5379 self._set_args(args)
5382 class UnsatisfiedResumeDep(portage.exception.PortageException):
5384 A dependency of a resume list is not installed. This
5385 can occur when a required package is dropped from the
5386 merge list via --skipfirst.
5388 def __init__(self, depgraph, value):
5389 portage.exception.PortageException.__init__(self, value)
5390 self.depgraph = depgraph
5392 class _internal_exception(portage.exception.PortageException):
5393 def __init__(self, value=""):
5394 portage.exception.PortageException.__init__(self, value)
5396 class _unknown_internal_error(_internal_exception):
5398 Used by the depgraph internally to terminate graph creation.
5399 The specific reason for the failure should have been dumped
5400 to stderr, unfortunately, the exact reason for the failure
5404 class _serialize_tasks_retry(_internal_exception):
5406 This is raised by the _serialize_tasks() method when it needs to
5407 be called again for some reason. The only case that it's currently
5408 used for is when neglected dependencies need to be added to the
5409 graph in order to avoid making a potentially unsafe decision.
5412 class _backtrack_mask(_internal_exception):
5414 This is raised by _show_unsatisfied_dep() when it's called with
5415 check_backtrack=True and a matching package has been masked by
5419 def need_restart(self):
5420 return self._dynamic_config._need_restart and \
5421 not self._dynamic_config._skip_restart
5423 def get_backtrack_parameters(self):
5425 "needed_unstable_keywords":
5426 self._dynamic_config._needed_unstable_keywords.copy(), \
5428 self._dynamic_config._runtime_pkg_mask.copy(),
5429 "needed_use_config_changes":
5430 self._dynamic_config._needed_use_config_changes.copy(),
5431 "needed_license_changes":
5432 self._dynamic_config._needed_license_changes.copy(),
5436 class _dep_check_composite_db(dbapi):
5438 A dbapi-like interface that is optimized for use in dep_check() calls.
5439 This is built on top of the existing depgraph package selection logic.
5440 Some packages that have been added to the graph may be masked from this
5441 view in order to influence the atom preference selection that occurs
5444 def __init__(self, depgraph, root):
5445 dbapi.__init__(self)
5446 self._depgraph = depgraph
5448 self._match_cache = {}
5449 self._cpv_pkg_map = {}
5451 def _clear_cache(self):
5452 self._match_cache.clear()
5453 self._cpv_pkg_map.clear()
5455 def match(self, atom):
5456 ret = self._match_cache.get(atom)
5461 atom = self._dep_expand(atom)
5462 pkg, existing = self._depgraph._select_package(self._root, atom)
5466 # Return the highest available from select_package() as well as
5467 # any matching slots in the graph db.
5469 slots.add(pkg.metadata["SLOT"])
5470 if pkg.cp.startswith("virtual/"):
5471 # For new-style virtual lookahead that occurs inside
5472 # dep_check(), examine all slots. This is needed
5473 # so that newer slots will not unnecessarily be pulled in
5474 # when a satisfying lower slot is already installed. For
5475 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5476 # there's no need to pull in a newer slot to satisfy a
5477 # virtual/jdk dependency.
5478 for db, pkg_type, built, installed, db_keys in \
5479 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5480 for cpv in db.match(atom):
5481 if portage.cpv_getkey(cpv) != pkg.cp:
5483 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5485 if self._visible(pkg):
5486 self._cpv_pkg_map[pkg.cpv] = pkg
5488 slots.remove(pkg.metadata["SLOT"])
5490 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5491 pkg, existing = self._depgraph._select_package(
5492 self._root, slot_atom)
5495 if not self._visible(pkg):
5497 self._cpv_pkg_map[pkg.cpv] = pkg
5500 self._cpv_sort_ascending(ret)
5501 self._match_cache[orig_atom] = ret
5504 def _visible(self, pkg):
5505 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5507 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5508 except (StopIteration, portage.exception.InvalidDependString):
5512 if pkg.installed and \
5513 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
5514 # Account for packages with masks (like KEYWORDS masks)
5515 # that are usually ignored in visibility checks for
5516 # installed packages, in order to handle cases like
5518 myopts = self._depgraph._frozen_config.myopts
5519 use_ebuild_visibility = myopts.get(
5520 '--use-ebuild-visibility', 'n') != 'n'
5521 avoid_update = "--update" not in myopts and \
5522 "remove" not in self._depgraph._dynamic_config.myparams
5523 usepkgonly = "--usepkgonly" in myopts
5524 if not avoid_update:
5525 if not use_ebuild_visibility and usepkgonly:
5529 pkg_eb = self._depgraph._pkg(
5530 pkg.cpv, "ebuild", pkg.root_config)
5531 except portage.exception.PackageNotFound:
5534 if not self._depgraph._pkg_visibility_check(pkg_eb):
5537 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5538 self._root].get(pkg.slot_atom)
5539 if in_graph is None:
5540 # Mask choices for packages which are not the highest visible
5541 # version within their slot (since they usually trigger slot
5543 highest_visible, in_graph = self._depgraph._select_package(
5544 self._root, pkg.slot_atom)
5545 # Note: highest_visible is not necessarily the real highest
5546 # visible, especially when --update is not enabled, so use
5547 # < operator instead of !=.
5548 if pkg < highest_visible:
5550 elif in_graph != pkg:
5551 # Mask choices for packages that would trigger a slot
5552 # conflict with a previously selected package.
5556 def _dep_expand(self, atom):
5558 This is only needed for old installed packages that may
5559 contain atoms that are not fully qualified with a specific
5560 category. Emulate the cpv_expand() function that's used by
5561 dbapi.match() in cases like this. If there are multiple
5562 matches, it's often due to a new-style virtual that has
5563 been added, so try to filter those out to avoid raising
5566 root_config = self._depgraph.roots[self._root]
5568 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5569 if len(expanded_atoms) > 1:
5570 non_virtual_atoms = []
5571 for x in expanded_atoms:
5572 if not x.cp.startswith("virtual/"):
5573 non_virtual_atoms.append(x)
5574 if len(non_virtual_atoms) == 1:
5575 expanded_atoms = non_virtual_atoms
5576 if len(expanded_atoms) > 1:
5577 # compatible with portage.cpv_expand()
5578 raise portage.exception.AmbiguousPackageName(
5579 [x.cp for x in expanded_atoms])
5581 atom = expanded_atoms[0]
5583 null_atom = Atom(insert_category_into_atom(atom, "null"))
5584 cat, atom_pn = portage.catsplit(null_atom.cp)
5585 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5587 # Allow the resolver to choose which virtual.
5588 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5593 def aux_get(self, cpv, wants):
5594 metadata = self._cpv_pkg_map[cpv].metadata
5595 return [metadata.get(x, "") for x in wants]
5597 def match_pkgs(self, atom):
5598 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5600 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5602 if "--quiet" in myopts:
5603 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5604 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
5605 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5606 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
5609 s = search(root_config, spinner, "--searchdesc" in myopts,
5610 "--quiet" not in myopts, "--usepkg" in myopts,
5611 "--usepkgonly" in myopts)
5612 null_cp = portage.dep_getkey(insert_category_into_atom(
5614 cat, atom_pn = portage.catsplit(null_cp)
5615 s.searchkey = atom_pn
5616 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5619 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5620 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
5622 def insert_category_into_atom(atom, category):
5623 alphanum = re.search(r'\w', atom)
5625 ret = atom[:alphanum.start()] + "%s/" % category + \
5626 atom[alphanum.start():]
5631 def _spinner_start(spinner, myopts):
5634 if "--quiet" not in myopts and \
5635 ("--pretend" in myopts or "--ask" in myopts or \
5636 "--tree" in myopts or "--verbose" in myopts):
5638 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5640 elif "--buildpkgonly" in myopts:
5644 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5645 if "--unordered-display" in myopts:
5646 portage.writemsg_stdout("\n" + \
5647 darkgreen("These are the packages that " + \
5648 "would be %s:" % action) + "\n\n")
5650 portage.writemsg_stdout("\n" + \
5651 darkgreen("These are the packages that " + \
5652 "would be %s, in reverse order:" % action) + "\n\n")
5654 portage.writemsg_stdout("\n" + \
5655 darkgreen("These are the packages that " + \
5656 "would be %s, in order:" % action) + "\n\n")
5658 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5659 if not show_spinner:
5660 spinner.update = spinner.update_quiet
5663 portage.writemsg_stdout("Calculating dependencies ")
5665 def _spinner_stop(spinner):
5666 if spinner is None or \
5667 spinner.update is spinner.update_quiet:
5670 portage.writemsg_stdout("\b\b... done!\n")
5672 def backtrack_depgraph(settings, trees, myopts, myparams,
5673 myaction, myfiles, spinner):
5675 Raises PackageSetNotFound if myfiles contains a missing package set.
5677 _spinner_start(spinner, myopts)
5679 return _backtrack_depgraph(settings, trees, myopts, myparams,
5680 myaction, myfiles, spinner)
5682 _spinner_stop(spinner)
5684 def _backtrack_depgraph(settings, trees, myopts, myparams,
5685 myaction, myfiles, spinner):
5687 backtrack_max = myopts.get('--backtrack', 10)
5688 backtrack_parameters = {}
5689 needed_unstable_keywords = None
5690 allow_backtracking = backtrack_max > 0
5692 frozen_config = _frozen_depgraph_config(settings, trees,
5695 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5696 frozen_config=frozen_config,
5697 allow_backtracking=allow_backtracking,
5698 **backtrack_parameters)
5699 success, favorites = mydepgraph.select_files(myfiles)
5701 if mydepgraph.need_restart() and backtracked < backtrack_max:
5702 backtrack_parameters = mydepgraph.get_backtrack_parameters()
5704 elif backtracked and allow_backtracking:
5705 if "--debug" in myopts:
5707 "\n\nbacktracking aborted after %s tries\n\n" % \
5708 backtracked, noiselevel=-1, level=logging.DEBUG)
5709 # Backtracking failed, so disable it and do
5710 # a plain dep calculation + error message.
5711 allow_backtracking = False
5712 #Don't reset needed_unstable_keywords here, since we don't want to
5713 #send the user through a "one step at a time" unmasking session for
5715 backtrack_parameters.pop('runtime_pkg_mask', None)
5720 return (success, mydepgraph, favorites)
5722 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5724 Raises PackageSetNotFound if myfiles contains a missing package set.
5726 _spinner_start(spinner, myopts)
5728 return _resume_depgraph(settings, trees, mtimedb, myopts,
5731 _spinner_stop(spinner)
5733 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5735 Construct a depgraph for the given resume list. This will raise
5736 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5737 TODO: Return reasons for dropped_tasks, for display/logging.
5739 @returns: (success, depgraph, dropped_tasks)
5742 skip_unsatisfied = True
5743 mergelist = mtimedb["resume"]["mergelist"]
5744 dropped_tasks = set()
5745 frozen_config = _frozen_depgraph_config(settings, trees,
5748 mydepgraph = depgraph(settings, trees,
5749 myopts, myparams, spinner, frozen_config=frozen_config)
5751 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5752 skip_masked=skip_masked)
5753 except depgraph.UnsatisfiedResumeDep as e:
5754 if not skip_unsatisfied:
5757 graph = mydepgraph._dynamic_config.digraph
5758 unsatisfied_parents = dict((dep.parent, dep.parent) \
5760 traversed_nodes = set()
5761 unsatisfied_stack = list(unsatisfied_parents)
5762 while unsatisfied_stack:
5763 pkg = unsatisfied_stack.pop()
5764 if pkg in traversed_nodes:
5766 traversed_nodes.add(pkg)
5768 # If this package was pulled in by a parent
5769 # package scheduled for merge, removing this
5770 # package may cause the the parent package's
5771 # dependency to become unsatisfied.
5772 for parent_node in graph.parent_nodes(pkg):
5773 if not isinstance(parent_node, Package) \
5774 or parent_node.operation not in ("merge", "nomerge"):
5777 graph.child_nodes(parent_node,
5778 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5779 if pkg in unsatisfied:
5780 unsatisfied_parents[parent_node] = parent_node
5781 unsatisfied_stack.append(parent_node)
5783 pruned_mergelist = []
5785 if isinstance(x, list) and \
5786 tuple(x) not in unsatisfied_parents:
5787 pruned_mergelist.append(x)
5789 # If the mergelist doesn't shrink then this loop is infinite.
5790 if len(pruned_mergelist) == len(mergelist):
5791 # This happens if a package can't be dropped because
5792 # it's already installed, but it has unsatisfied PDEPEND.
5794 mergelist[:] = pruned_mergelist
5796 # Exclude installed packages that have been removed from the graph due
5797 # to failure to build/install runtime dependencies after the dependent
5798 # package has already been installed.
5799 dropped_tasks.update(pkg for pkg in \
5800 unsatisfied_parents if pkg.operation != "nomerge")
5801 mydepgraph.break_refs(unsatisfied_parents)
5803 del e, graph, traversed_nodes, \
5804 unsatisfied_parents, unsatisfied_stack
5808 return (success, mydepgraph, dropped_tasks)
5810 def get_mask_info(root_config, cpv, pkgsettings,
5811 db, pkg_type, built, installed, db_keys, _pkg_use_enabled=None):
5814 metadata = dict(zip(db_keys,
5815 db.aux_get(cpv, db_keys)))
5819 if metadata is None:
5820 mreasons = ["corruption"]
5822 eapi = metadata['EAPI']
5825 if not portage.eapi_is_supported(eapi):
5826 mreasons = ['EAPI %s' % eapi]
5828 pkg = Package(type_name=pkg_type, root_config=root_config,
5829 cpv=cpv, built=built, installed=installed, metadata=metadata)
5832 if _pkg_use_enabled is not None:
5833 modified_use = _pkg_use_enabled(pkg)
5835 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=modified_use)
5836 return metadata, mreasons
5838 def show_masked_packages(masked_packages):
5839 shown_licenses = set()
5840 shown_comments = set()
5841 # Maybe there is both an ebuild and a binary. Only
5842 # show one of them to avoid redundant appearance.
5844 have_eapi_mask = False
5845 for (root_config, pkgsettings, cpv,
5846 metadata, mreasons) in masked_packages:
5847 if cpv in shown_cpvs:
5850 comment, filename = None, None
5851 if "package.mask" in mreasons:
5852 comment, filename = \
5853 portage.getmaskingreason(
5854 cpv, metadata=metadata,
5855 settings=pkgsettings,
5856 portdb=root_config.trees["porttree"].dbapi,
5857 return_location=True)
5858 missing_licenses = []
5860 if not portage.eapi_is_supported(metadata["EAPI"]):
5861 have_eapi_mask = True
5863 missing_licenses = \
5864 pkgsettings._getMissingLicenses(
5866 except portage.exception.InvalidDependString:
5867 # This will have already been reported
5868 # above via mreasons.
5871 writemsg_stdout("- "+cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
5873 if comment and comment not in shown_comments:
5874 writemsg_stdout(filename + ":\n" + comment + "\n",
5876 shown_comments.add(comment)
5877 portdb = root_config.trees["porttree"].dbapi
5878 for l in missing_licenses:
5879 l_path = portdb.findLicensePath(l)
5880 if l in shown_licenses:
5882 msg = ("A copy of the '%s' license" + \
5883 " is located at '%s'.\n\n") % (l, l_path)
5884 writemsg_stdout(msg, noiselevel=-1)
5885 shown_licenses.add(l)
5886 return have_eapi_mask
5888 def show_mask_docs():
5889 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
5890 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
5892 def show_blocker_docs_link():
5893 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
5894 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
5895 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
5897 def get_masking_status(pkg, pkgsettings, root_config, use=None):
5898 return [mreason.message for \
5899 mreason in _get_masking_status(pkg, pkgsettings, root_config, use=use)]
5901 def _get_masking_status(pkg, pkgsettings, root_config, use=None):
5903 mreasons = _getmaskingstatus(
5904 pkg, settings=pkgsettings,
5905 portdb=root_config.trees["porttree"].dbapi)
5907 if not pkg.installed:
5908 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5909 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
5910 pkg.metadata["CHOST"]))
5913 for msg_type, msgs in pkg.invalid.items():
5916 _MaskReason("invalid", "invalid: %s" % (msg,)))
5918 if not pkg.metadata["SLOT"]:
5920 _MaskReason("invalid", "SLOT: undefined"))