1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
12 from itertools import chain
15 from portage import os, OrderedDict
16 from portage import _unicode_decode
17 from portage.const import PORTAGE_PACKAGE_ATOM
18 from portage.dbapi import dbapi
19 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use
20 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
21 from portage.exception import InvalidAtom, InvalidDependString
22 from portage.output import colorize, create_color_func, \
24 bad = create_color_func("BAD")
25 from portage.package.ebuild.getmaskingstatus import \
26 _getmaskingstatus, _MaskReason
27 from portage._sets import SETPREFIX
28 from portage._sets.base import InternalPackageSet
29 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
30 from portage.util import writemsg_level
31 from portage.util.digraph import digraph
33 from _emerge.AtomArg import AtomArg
34 from _emerge.Blocker import Blocker
35 from _emerge.BlockerCache import BlockerCache
36 from _emerge.BlockerDepPriority import BlockerDepPriority
37 from _emerge.countdown import countdown
38 from _emerge.create_world_atom import create_world_atom
39 from _emerge.Dependency import Dependency
40 from _emerge.DependencyArg import DependencyArg
41 from _emerge.DepPriority import DepPriority
42 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
43 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
44 from _emerge.FakeVartree import FakeVartree
45 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
46 from _emerge.is_valid_package_atom import is_valid_package_atom
47 from _emerge.Package import Package
48 from _emerge.PackageArg import PackageArg
49 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
50 from _emerge.RootConfig import RootConfig
51 from _emerge.search import search
52 from _emerge.SetArg import SetArg
53 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
54 from _emerge.UnmergeDepPriority import UnmergeDepPriority
55 from _emerge.UseFlagDisplay import pkg_use_display
57 from _emerge.resolver.slot_collision import slot_conflict_handler
58 from _emerge.resolver.circular_dependency import circular_dependency_handler
59 from _emerge.resolver.output import display
61 if sys.hexversion >= 0x3000000:
65 class _scheduler_graph_config(object):
66 def __init__(self, trees, pkg_cache, graph, mergelist):
68 self.pkg_cache = pkg_cache
70 self.mergelist = mergelist
72 class _frozen_depgraph_config(object):
74 def __init__(self, settings, trees, myopts, spinner):
75 self.settings = settings
76 self.target_root = settings["ROOT"]
79 if settings.get("PORTAGE_DEBUG", "") == "1":
81 self.spinner = spinner
82 self._running_root = trees["/"]["root_config"]
83 self._opts_no_restart = frozenset(["--buildpkgonly",
84 "--fetchonly", "--fetch-all-uri", "--pretend"])
87 self._trees_orig = trees
89 # All Package instances
91 self._highest_license_masked = {}
93 self.trees[myroot] = {}
94 # Create a RootConfig instance that references
95 # the FakeVartree instead of the real one.
96 self.roots[myroot] = RootConfig(
97 trees[myroot]["vartree"].settings,
99 trees[myroot]["root_config"].setconfig)
100 for tree in ("porttree", "bintree"):
101 self.trees[myroot][tree] = trees[myroot][tree]
102 self.trees[myroot]["vartree"] = \
103 FakeVartree(trees[myroot]["root_config"],
104 pkg_cache=self._pkg_cache,
105 pkg_root_config=self.roots[myroot])
106 self.pkgsettings[myroot] = portage.config(
107 clone=self.trees[myroot]["vartree"].settings)
109 self._required_set_names = set(["world"])
111 self.excluded_pkgs = InternalPackageSet(allow_wildcard=True)
112 for x in ' '.join(myopts.get("--exclude", [])).split():
114 x = Atom(x, allow_wildcard=True)
115 except portage.exception.InvalidAtom:
116 x = Atom("*/" + x, allow_wildcard=True)
117 self.excluded_pkgs.add(x)
119 class _depgraph_sets(object):
121 # contains all sets added to the graph
123 # contains non-set atoms given as arguments
124 self.sets['__non_set_args__'] = InternalPackageSet()
125 # contains all atoms from all sets added to the graph, including
126 # atoms given as arguments
127 self.atoms = InternalPackageSet()
128 self.atom_arg_map = {}
130 class _dynamic_depgraph_config(object):
132 def __init__(self, depgraph, myparams, allow_backtracking,
133 runtime_pkg_mask, needed_unstable_keywords, needed_use_config_changes, needed_license_changes):
134 self.myparams = myparams.copy()
135 self._vdb_loaded = False
136 self._allow_backtracking = allow_backtracking
137 # Maps slot atom to package for each Package added to the graph.
138 self._slot_pkg_map = {}
139 # Maps nodes to the reasons they were selected for reinstallation.
140 self._reinstall_nodes = {}
142 # Contains a filtered view of preferred packages that are selected
143 # from available repositories.
144 self._filtered_trees = {}
145 # Contains installed packages and new packages that have been added
147 self._graph_trees = {}
148 # Caches visible packages returned from _select_package, for use in
149 # depgraph._iter_atoms_for_pkg() SLOT logic.
150 self._visible_pkgs = {}
151 #contains the args created by select_files
152 self._initial_arg_list = []
153 self.digraph = portage.digraph()
154 # manages sets added to the graph
156 # contains all nodes pulled in by self.sets
157 self._set_nodes = set()
158 # Contains only Blocker -> Uninstall edges
159 self._blocker_uninstalls = digraph()
160 # Contains only Package -> Blocker edges
161 self._blocker_parents = digraph()
162 # Contains only irrelevant Package -> Blocker edges
163 self._irrelevant_blockers = digraph()
164 # Contains only unsolvable Package -> Blocker edges
165 self._unsolvable_blockers = digraph()
166 # Contains all Blocker -> Blocked Package edges
167 self._blocked_pkgs = digraph()
168 # Contains world packages that have been protected from
169 # uninstallation but may not have been added to the graph
170 # if the graph is not complete yet.
171 self._blocked_world_pkgs = {}
172 # Contains packages whose dependencies have been traversed.
173 # This use used to check if we have accounted for blockers
174 # relevant to a package.
175 self._traversed_pkg_deps = set()
176 self._slot_collision_info = {}
177 # Slot collision nodes are not allowed to block other packages since
178 # blocker validation is only able to account for one package per slot.
179 self._slot_collision_nodes = set()
180 self._parent_atoms = {}
181 self._slot_conflict_parent_atoms = set()
182 self._slot_conflict_handler = None
183 self._circular_dependency_handler = None
184 self._serialized_tasks_cache = None
185 self._scheduler_graph = None
186 self._displayed_list = None
187 self._pprovided_args = []
188 self._missing_args = []
189 self._masked_installed = set()
190 self._masked_license_updates = set()
191 self._unsatisfied_deps_for_display = []
192 self._unsatisfied_blockers_for_display = None
193 self._circular_deps_for_display = None
195 self._dep_disjunctive_stack = []
196 self._unsatisfied_deps = []
197 self._initially_unsatisfied_deps = []
198 self._ignored_deps = []
199 self._highest_pkg_cache = {}
201 if runtime_pkg_mask is None:
202 runtime_pkg_mask = {}
204 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
205 runtime_pkg_mask.items())
207 if needed_unstable_keywords is None:
208 self._needed_unstable_keywords = set()
210 self._needed_unstable_keywords = needed_unstable_keywords.copy()
212 if needed_license_changes is None:
213 self._needed_license_changes = {}
215 self._needed_license_changes = needed_license_changes.copy()
217 if needed_use_config_changes is None:
218 self._needed_use_config_changes = {}
220 self._needed_use_config_changes = \
221 dict((k.copy(), (v[0].copy(), v[1].copy())) for (k, v) in \
222 needed_use_config_changes.items())
224 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True
226 self._runtime_pkg_mask = runtime_pkg_mask
227 self._need_restart = False
228 # For conditions that always require user intervention, such as
229 # unsatisfied REQUIRED_USE (currently has no autounmask support).
230 self._skip_restart = False
232 for myroot in depgraph._frozen_config.trees:
233 self.sets[myroot] = _depgraph_sets()
234 self._slot_pkg_map[myroot] = {}
235 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
236 # This dbapi instance will model the state that the vdb will
237 # have after new packages have been installed.
238 fakedb = PackageVirtualDbapi(vardb.settings)
240 self.mydbapi[myroot] = fakedb
243 graph_tree.dbapi = fakedb
244 self._graph_trees[myroot] = {}
245 self._filtered_trees[myroot] = {}
246 # Substitute the graph tree for the vartree in dep_check() since we
247 # want atom selections to be consistent with package selections
248 # have already been made.
249 self._graph_trees[myroot]["porttree"] = graph_tree
250 self._graph_trees[myroot]["vartree"] = graph_tree
253 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
254 self._filtered_trees[myroot]["porttree"] = filtered_tree
255 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
257 # Passing in graph_tree as the vartree here could lead to better
258 # atom selections in some cases by causing atoms for packages that
259 # have been added to the graph to be preferred over other choices.
260 # However, it can trigger atom selections that result in
261 # unresolvable direct circular dependencies. For example, this
262 # happens with gwydion-dylan which depends on either itself or
263 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
264 # gwydion-dylan-bin needs to be selected in order to avoid a
265 # an unresolvable direct circular dependency.
267 # To solve the problem described above, pass in "graph_db" so that
268 # packages that have been added to the graph are distinguishable
269 # from other available packages and installed packages. Also, pass
270 # the parent package into self._select_atoms() calls so that
271 # unresolvable direct circular dependencies can be detected and
272 # avoided when possible.
273 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
274 self._filtered_trees[myroot]["vartree"] = \
275 depgraph._frozen_config.trees[myroot]["vartree"]
278 # (db, pkg_type, built, installed, db_keys)
279 if "remove" in self.myparams:
280 # For removal operations, use _dep_check_composite_db
281 # for availability and visibility checks. This provides
282 # consistency with install operations, so we don't
283 # get install/uninstall cycles like in bug #332719.
284 self._graph_trees[myroot]["porttree"] = filtered_tree
286 if "--usepkgonly" not in depgraph._frozen_config.myopts:
287 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
288 db_keys = list(portdb._aux_cache_keys)
289 dbs.append((portdb, "ebuild", False, False, db_keys))
291 if "--usepkg" in depgraph._frozen_config.myopts:
292 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
293 db_keys = list(bindb._aux_cache_keys)
294 dbs.append((bindb, "binary", True, False, db_keys))
296 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
297 db_keys = list(depgraph._frozen_config._trees_orig[myroot
298 ]["vartree"].dbapi._aux_cache_keys)
299 dbs.append((vardb, "installed", True, True, db_keys))
300 self._filtered_trees[myroot]["dbs"] = dbs
302 class depgraph(object):
304 pkg_tree_map = RootConfig.pkg_tree_map
306 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
308 def __init__(self, settings, trees, myopts, myparams, spinner,
309 frozen_config=None, runtime_pkg_mask=None, needed_unstable_keywords=None, \
310 needed_use_config_changes=None, needed_license_changes=None, allow_backtracking=False):
311 if frozen_config is None:
312 frozen_config = _frozen_depgraph_config(settings, trees,
314 self._frozen_config = frozen_config
315 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
316 allow_backtracking, runtime_pkg_mask, needed_unstable_keywords, \
317 needed_use_config_changes, needed_license_changes)
319 self._select_atoms = self._select_atoms_highest_available
320 self._select_package = self._select_pkg_highest_available
324 Load installed package metadata if appropriate. This used to be called
325 from the constructor, but that wasn't very nice since this procedure
326 is slow and it generates spinner output. So, now it's called on-demand
327 by various methods when necessary.
330 if self._dynamic_config._vdb_loaded:
333 for myroot in self._frozen_config.trees:
335 preload_installed_pkgs = \
336 "--nodeps" not in self._frozen_config.myopts
338 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
339 if not fake_vartree.dbapi:
340 # This needs to be called for the first depgraph, but not for
341 # backtracking depgraphs that share the same frozen_config.
344 # FakeVartree.sync() populates virtuals, and we want
345 # self.pkgsettings to have them populated too.
346 self._frozen_config.pkgsettings[myroot] = \
347 portage.config(clone=fake_vartree.settings)
349 if preload_installed_pkgs:
350 vardb = fake_vartree.dbapi
351 fakedb = self._dynamic_config._graph_trees[
352 myroot]["vartree"].dbapi
355 self._spinner_update()
356 # This triggers metadata updates via FakeVartree.
357 vardb.aux_get(pkg.cpv, [])
358 fakedb.cpv_inject(pkg)
360 self._dynamic_config._vdb_loaded = True
362 def _spinner_update(self):
363 if self._frozen_config.spinner:
364 self._frozen_config.spinner.update()
366 def _show_missed_update(self):
368 # In order to minimize noise, show only the highest
369 # missed update from each SLOT.
371 for pkg, mask_reasons in \
372 self._dynamic_config._runtime_pkg_mask.items():
374 # Exclude installed here since we only
375 # want to show available updates.
377 k = (pkg.root, pkg.slot_atom)
378 if k in missed_updates:
379 other_pkg, mask_type, parent_atoms = missed_updates[k]
382 for mask_type, parent_atoms in mask_reasons.items():
385 missed_updates[k] = (pkg, mask_type, parent_atoms)
388 if not missed_updates:
391 missed_update_types = {}
392 for pkg, mask_type, parent_atoms in missed_updates.values():
393 missed_update_types.setdefault(mask_type,
394 []).append((pkg, parent_atoms))
396 if '--quiet' in self._frozen_config.myopts and \
397 '--debug' not in self._frozen_config.myopts:
398 missed_update_types.pop("slot conflict", None)
399 missed_update_types.pop("missing dependency", None)
401 self._show_missed_update_slot_conflicts(
402 missed_update_types.get("slot conflict"))
404 self._show_missed_update_unsatisfied_dep(
405 missed_update_types.get("missing dependency"))
407 def _show_missed_update_unsatisfied_dep(self, missed_updates):
409 if not missed_updates:
412 backtrack_masked = []
414 for pkg, parent_atoms in missed_updates:
417 for parent, root, atom in parent_atoms:
418 self._show_unsatisfied_dep(root, atom, myparent=parent,
419 check_backtrack=True)
420 except self._backtrack_mask:
421 # This is displayed below in abbreviated form.
422 backtrack_masked.append((pkg, parent_atoms))
425 writemsg("\n!!! The following update has been skipped " + \
426 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
428 writemsg(str(pkg.slot_atom), noiselevel=-1)
430 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
431 writemsg("\n", noiselevel=-1)
433 for parent, root, atom in parent_atoms:
434 self._show_unsatisfied_dep(root, atom, myparent=parent)
435 writemsg("\n", noiselevel=-1)
438 # These are shown in abbreviated form, in order to avoid terminal
439 # flooding from mask messages as reported in bug #285832.
440 writemsg("\n!!! The following update(s) have been skipped " + \
441 "due to unsatisfied dependencies\n" + \
442 "!!! triggered by backtracking:\n\n", noiselevel=-1)
443 for pkg, parent_atoms in backtrack_masked:
444 writemsg(str(pkg.slot_atom), noiselevel=-1)
446 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
447 writemsg("\n", noiselevel=-1)
449 def _show_missed_update_slot_conflicts(self, missed_updates):
451 if not missed_updates:
455 msg.append("\n!!! One or more updates have been skipped due to " + \
456 "a dependency conflict:\n\n")
459 for pkg, parent_atoms in missed_updates:
460 msg.append(str(pkg.slot_atom))
462 msg.append(" for %s" % (pkg.root,))
465 for parent, atom in parent_atoms:
469 msg.append(" conflicts with\n")
471 if isinstance(parent,
472 (PackageArg, AtomArg)):
473 # For PackageArg and AtomArg types, it's
474 # redundant to display the atom attribute.
475 msg.append(str(parent))
477 # Display the specific atom from SetArg or
479 msg.append("%s required by %s" % (atom, parent))
483 writemsg("".join(msg), noiselevel=-1)
485 def _show_slot_collision_notice(self):
486 """Show an informational message advising the user to mask one of the
487 the packages. In some cases it may be possible to resolve this
488 automatically, but support for backtracking (removal nodes that have
489 already been selected) will be required in order to handle all possible
493 if not self._dynamic_config._slot_collision_info:
496 self._show_merge_list()
498 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
499 handler = self._dynamic_config._slot_conflict_handler
501 conflict = handler.get_conflict()
502 writemsg(conflict, noiselevel=-1)
504 explanation = handler.get_explanation()
506 writemsg(explanation, noiselevel=-1)
509 if "--quiet" in self._frozen_config.myopts:
513 msg.append("It may be possible to solve this problem ")
514 msg.append("by using package.mask to prevent one of ")
515 msg.append("those packages from being selected. ")
516 msg.append("However, it is also possible that conflicting ")
517 msg.append("dependencies exist such that they are impossible to ")
518 msg.append("satisfy simultaneously. If such a conflict exists in ")
519 msg.append("the dependencies of two different packages, then those ")
520 msg.append("packages can not be installed simultaneously.")
521 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
522 if not self._dynamic_config._allow_backtracking and \
523 (backtrack_opt is None or \
524 (backtrack_opt > 0 and backtrack_opt < 30)):
525 msg.append(" You may want to try a larger value of the ")
526 msg.append("--backtrack option, such as --backtrack=30, ")
527 msg.append("in order to see if that will solve this conflict ")
528 msg.append("automatically.")
530 for line in textwrap.wrap(''.join(msg), 70):
531 writemsg(line + '\n', noiselevel=-1)
532 writemsg('\n', noiselevel=-1)
535 msg.append("For more information, see MASKED PACKAGES ")
536 msg.append("section in the emerge man page or refer ")
537 msg.append("to the Gentoo Handbook.")
538 for line in textwrap.wrap(''.join(msg), 70):
539 writemsg(line + '\n', noiselevel=-1)
540 writemsg('\n', noiselevel=-1)
542 def _process_slot_conflicts(self):
544 Process slot conflict data to identify specific atoms which
545 lead to conflict. These atoms only match a subset of the
546 packages that have been pulled into a given slot.
548 for (slot_atom, root), slot_nodes \
549 in self._dynamic_config._slot_collision_info.items():
551 all_parent_atoms = set()
552 for pkg in slot_nodes:
553 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
556 all_parent_atoms.update(parent_atoms)
558 for pkg in slot_nodes:
559 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
560 if parent_atoms is None:
562 self._dynamic_config._parent_atoms[pkg] = parent_atoms
563 for parent_atom in all_parent_atoms:
564 if parent_atom in parent_atoms:
566 # Use package set for matching since it will match via
567 # PROVIDE when necessary, while match_from_list does not.
568 parent, atom = parent_atom
569 atom_set = InternalPackageSet(
570 initial_atoms=(atom,))
571 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
572 parent_atoms.add(parent_atom)
574 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
576 def _reinstall_for_flags(self, forced_flags,
577 orig_use, orig_iuse, cur_use, cur_iuse):
578 """Return a set of flags that trigger reinstallation, or None if there
579 are no such flags."""
580 if "--newuse" in self._frozen_config.myopts or \
581 "--binpkg-respect-use" in self._frozen_config.myopts:
582 flags = set(orig_iuse.symmetric_difference(
583 cur_iuse).difference(forced_flags))
584 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
585 cur_iuse.intersection(cur_use)))
588 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
589 flags = orig_iuse.intersection(orig_use).symmetric_difference(
590 cur_iuse.intersection(cur_use))
595 def _create_graph(self, allow_unsatisfied=False):
596 dep_stack = self._dynamic_config._dep_stack
597 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
598 while dep_stack or dep_disjunctive_stack:
599 self._spinner_update()
601 dep = dep_stack.pop()
602 if isinstance(dep, Package):
603 if not self._add_pkg_deps(dep,
604 allow_unsatisfied=allow_unsatisfied):
607 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
609 if dep_disjunctive_stack:
610 if not self._pop_disjunction(allow_unsatisfied):
614 def _expand_set_args(self, input_args, add_to_digraph=False):
616 Iterate over a list of DependencyArg instances and yield all
617 instances given in the input together with additional SetArg
618 instances that are generated from nested sets.
619 @param input_args: An iterable of DependencyArg instances
620 @type input_args: Iterable
621 @param add_to_digraph: If True then add SetArg instances
622 to the digraph, in order to record parent -> child
623 relationships from nested sets
624 @type add_to_digraph: Boolean
626 @returns: All args given in the input together with additional
627 SetArg instances that are generated from nested sets
630 traversed_set_args = set()
632 for arg in input_args:
633 if not isinstance(arg, SetArg):
637 root_config = arg.root_config
638 depgraph_sets = self._dynamic_config.sets[root_config.root]
641 arg = arg_stack.pop()
642 if arg in traversed_set_args:
644 traversed_set_args.add(arg)
647 # Traverse nested sets and add them to the stack
648 # if they're not already in the graph. Also, graph
649 # edges between parent and nested sets.
650 for token in arg.pset.getNonAtoms():
651 if not token.startswith(SETPREFIX):
653 s = token[len(SETPREFIX):]
654 nested_set = depgraph_sets.sets.get(s)
655 if nested_set is None:
656 nested_set = root_config.sets.get(s)
657 if nested_set is not None:
658 nested_arg = SetArg(arg=token, pset=nested_set,
659 root_config=root_config)
660 arg_stack.append(nested_arg)
662 self._dynamic_config.digraph.add(nested_arg, arg,
663 priority=BlockerDepPriority.instance)
664 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
666 def _add_dep(self, dep, allow_unsatisfied=False):
667 debug = "--debug" in self._frozen_config.myopts
668 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
669 nodeps = "--nodeps" in self._frozen_config.myopts
670 deep = self._dynamic_config.myparams.get("deep", 0)
671 recurse = deep is True or dep.depth <= deep
673 if not buildpkgonly and \
675 dep.parent not in self._dynamic_config._slot_collision_nodes:
676 if dep.parent.onlydeps:
677 # It's safe to ignore blockers if the
678 # parent is an --onlydeps node.
680 # The blocker applies to the root where
681 # the parent is or will be installed.
682 blocker = Blocker(atom=dep.atom,
683 eapi=dep.parent.metadata["EAPI"],
684 priority=dep.priority, root=dep.parent.root)
685 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
688 if dep.child is None:
689 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
690 onlydeps=dep.onlydeps)
692 # The caller has selected a specific package
693 # via self._minimize_packages().
695 existing_node = self._dynamic_config._slot_pkg_map[
696 dep.root].get(dep_pkg.slot_atom)
699 if dep.priority.optional:
700 # This could be an unnecessary build-time dep
701 # pulled in by --with-bdeps=y.
703 if allow_unsatisfied:
704 self._dynamic_config._unsatisfied_deps.append(dep)
706 self._dynamic_config._unsatisfied_deps_for_display.append(
707 ((dep.root, dep.atom), {"myparent":dep.parent}))
709 # The parent node should not already be in
710 # runtime_pkg_mask, since that would trigger an
711 # infinite backtracking loop.
712 if self._dynamic_config._allow_backtracking:
713 if dep.parent in self._dynamic_config._runtime_pkg_mask:
714 if "--debug" in self._frozen_config.myopts:
716 "!!! backtracking loop detected: %s %s\n" % \
718 self._dynamic_config._runtime_pkg_mask[
719 dep.parent]), noiselevel=-1)
721 # Do not backtrack if only USE have to be changed in
722 # order to satisfy the dependency.
723 dep_pkg, existing_node = \
724 self._select_package(dep.root, dep.atom.without_use,
725 onlydeps=dep.onlydeps)
727 self._dynamic_config._runtime_pkg_mask.setdefault(
728 dep.parent, {})["missing dependency"] = \
729 set([(dep.parent, dep.root, dep.atom)])
730 self._dynamic_config._need_restart = True
731 if "--debug" in self._frozen_config.myopts:
735 msg.append("backtracking due to unsatisfied dep:")
736 msg.append(" parent: %s" % dep.parent)
737 msg.append(" priority: %s" % dep.priority)
738 msg.append(" root: %s" % dep.root)
739 msg.append(" atom: %s" % dep.atom)
741 writemsg_level("".join("%s\n" % l for l in msg),
742 noiselevel=-1, level=logging.DEBUG)
746 if not self._add_pkg(dep_pkg, dep):
750 def _check_slot_conflict(self, pkg, atom):
751 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
754 matches = pkg.cpv == existing_node.cpv
755 if pkg != existing_node and \
757 # Use package set for matching since it will match via
758 # PROVIDE when necessary, while match_from_list does not.
759 matches = bool(InternalPackageSet(initial_atoms=(atom,),
760 ).findAtomForPackage(existing_node,
761 modified_use=self._pkg_use_enabled(existing_node)))
763 return (existing_node, matches)
765 def _add_pkg(self, pkg, dep):
772 myparent = dep.parent
773 priority = dep.priority
776 priority = DepPriority()
778 Fills the digraph with nodes comprised of packages to merge.
779 mybigkey is the package spec of the package to merge.
780 myparent is the package depending on mybigkey ( or None )
781 addme = Should we add this package to the digraph or are we just looking at it's deps?
782 Think --onlydeps, we need to ignore packages in that case.
785 #IUSE-aware emerge -> USE DEP aware depgraph
786 #"no downgrade" emerge
788 # Ensure that the dependencies of the same package
789 # are never processed more than once.
790 previously_added = pkg in self._dynamic_config.digraph
792 # select the correct /var database that we'll be checking against
793 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
794 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
799 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
800 except portage.exception.InvalidDependString as e:
801 if not pkg.installed:
802 # should have been masked before it was selected
806 # NOTE: REQUIRED_USE checks are delayed until after
807 # package selection, since we want to prompt the user
808 # for USE adjustment rather than have REQUIRED_USE
809 # affect package selection and || dep choices.
810 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
811 eapi_has_required_use(pkg.metadata["EAPI"]):
812 required_use_is_sat = check_required_use(
813 pkg.metadata["REQUIRED_USE"],
814 self._pkg_use_enabled(pkg),
815 pkg.iuse.is_valid_flag)
816 if not required_use_is_sat:
817 if dep.atom is not None and dep.parent is not None:
818 self._add_parent_atom(pkg, (dep.parent, dep.atom))
821 for parent_atom in arg_atoms:
822 parent, atom = parent_atom
823 self._add_parent_atom(pkg, parent_atom)
827 atom = Atom("=" + pkg.cpv)
828 self._dynamic_config._unsatisfied_deps_for_display.append(
829 ((pkg.root, atom), {"myparent":dep.parent}))
830 self._dynamic_config._skip_restart = True
835 existing_node, existing_node_matches = \
836 self._check_slot_conflict(pkg, dep.atom)
837 slot_collision = False
839 if existing_node_matches:
840 # The existing node can be reused.
842 for parent_atom in arg_atoms:
843 parent, atom = parent_atom
844 self._dynamic_config.digraph.add(existing_node, parent,
846 self._add_parent_atom(existing_node, parent_atom)
847 # If a direct circular dependency is not an unsatisfied
848 # buildtime dependency then drop it here since otherwise
849 # it can skew the merge order calculation in an unwanted
851 if existing_node != myparent or \
852 (priority.buildtime and not priority.satisfied):
853 self._dynamic_config.digraph.addnode(existing_node, myparent,
855 if dep.atom is not None and dep.parent is not None:
856 self._add_parent_atom(existing_node,
857 (dep.parent, dep.atom))
860 # A slot conflict has occurred.
861 # The existing node should not already be in
862 # runtime_pkg_mask, since that would trigger an
863 # infinite backtracking loop.
864 if self._dynamic_config._allow_backtracking and \
866 self._dynamic_config._runtime_pkg_mask:
867 if "--debug" in self._frozen_config.myopts:
869 "!!! backtracking loop detected: %s %s\n" % \
871 self._dynamic_config._runtime_pkg_mask[
872 existing_node]), noiselevel=-1)
873 elif self._dynamic_config._allow_backtracking and \
874 not self._accept_blocker_conflicts():
875 self._add_slot_conflict(pkg)
876 if dep.atom is not None and dep.parent is not None:
877 self._add_parent_atom(pkg, (dep.parent, dep.atom))
879 for parent_atom in arg_atoms:
880 parent, atom = parent_atom
881 self._add_parent_atom(pkg, parent_atom)
882 self._process_slot_conflicts()
887 # The ordering of backtrack_data can make
888 # a difference here, because both mask actions may lead
889 # to valid, but different, solutions and the one with
890 # 'existing_node' masked is usually the better one. Because
891 # of that, we choose an order such that
892 # the backtracker will first explore the choice with
893 # existing_node masked. The backtracker reverses the
894 # order, so the order it uses is the reverse of the
895 # order shown here. See bug #339606.
896 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
897 # For missed update messages, find out which
898 # atoms matched to_be_selected that did not
899 # match to_be_masked.
901 self._dynamic_config._parent_atoms.get(to_be_selected, set())
903 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
905 parent_atoms = conflict_atoms
907 all_parents.update(parent_atoms)
910 for parent, atom in parent_atoms:
911 i = InternalPackageSet(initial_atoms=(atom,))
912 if not i.findAtomForPackage(to_be_masked):
916 if to_be_selected >= to_be_masked:
917 # We only care about the parent atoms
918 # when they trigger a downgrade.
921 fallback_data.append((to_be_masked, parent_atoms))
924 # 'to_be_masked' does not violate any parent atom, which means
925 # there is no point in masking it.
928 backtrack_data.append((to_be_masked, parent_atoms))
930 if not backtrack_data:
931 # This shouldn't happen, but fall back to the old
932 # behavior if this gets triggered somehow.
933 backtrack_data = fallback_data
935 if len(backtrack_data) > 1:
936 # NOTE: Generally, we prefer to mask the higher
937 # version since this solves common cases in which a
938 # lower version is needed so that all dependencies
939 # will be satisfied (bug #337178). However, if
940 # existing_node happens to be installed then we
941 # mask that since this is a common case that is
942 # triggered when --update is not enabled.
943 if existing_node.installed:
945 elif pkg > existing_node:
946 backtrack_data.reverse()
948 to_be_masked, parent_atoms = backtrack_data[-1]
950 self._dynamic_config._runtime_pkg_mask.setdefault(
951 to_be_masked, {})["slot conflict"] = parent_atoms
952 self._dynamic_config._need_restart = True
953 if "--debug" in self._frozen_config.myopts:
957 msg.append("backtracking due to slot conflict:")
958 if backtrack_data is fallback_data:
959 msg.append("!!! backtrack_data fallback")
960 msg.append(" first package: %s" % existing_node)
961 msg.append(" second package: %s" % pkg)
962 msg.append(" package to mask: %s" % to_be_masked)
963 msg.append(" slot: %s" % pkg.slot_atom)
964 msg.append(" parents: %s" % ", ".join( \
965 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
967 writemsg_level("".join("%s\n" % l for l in msg),
968 noiselevel=-1, level=logging.DEBUG)
971 # A slot collision has occurred. Sometimes this coincides
972 # with unresolvable blockers, so the slot collision will be
973 # shown later if there are no unresolvable blockers.
974 self._add_slot_conflict(pkg)
975 slot_collision = True
978 # Now add this node to the graph so that self.display()
979 # can show use flags and --tree portage.output. This node is
980 # only being partially added to the graph. It must not be
981 # allowed to interfere with the other nodes that have been
982 # added. Do not overwrite data for existing nodes in
983 # self._dynamic_config.mydbapi since that data will be used for blocker
985 # Even though the graph is now invalid, continue to process
986 # dependencies so that things like --fetchonly can still
987 # function despite collisions.
989 elif not previously_added:
990 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
991 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
992 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
993 self._dynamic_config._highest_pkg_cache.clear()
994 self._check_masks(pkg)
996 if not pkg.installed:
997 # Allow this package to satisfy old-style virtuals in case it
998 # doesn't already. Any pre-existing providers will be preferred
1001 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1002 # For consistency, also update the global virtuals.
1003 settings = self._frozen_config.roots[pkg.root].settings
1005 settings.setinst(pkg.cpv, pkg.metadata)
1007 except portage.exception.InvalidDependString as e:
1008 if not pkg.installed:
1009 # should have been masked before it was selected
1013 self._dynamic_config._set_nodes.add(pkg)
1015 # Do this even when addme is False (--onlydeps) so that the
1016 # parent/child relationship is always known in case
1017 # self._show_slot_collision_notice() needs to be called later.
1018 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1019 if dep.atom is not None and dep.parent is not None:
1020 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1023 for parent_atom in arg_atoms:
1024 parent, atom = parent_atom
1025 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1026 self._add_parent_atom(pkg, parent_atom)
1028 """ This section determines whether we go deeper into dependencies or not.
1029 We want to go deeper on a few occasions:
1030 Installing package A, we need to make sure package A's deps are met.
1031 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1032 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1037 deep = self._dynamic_config.myparams.get("deep", 0)
1038 recurse = deep is True or depth + 1 <= deep
1039 dep_stack = self._dynamic_config._dep_stack
1040 if "recurse" not in self._dynamic_config.myparams:
1042 elif pkg.installed and not recurse:
1043 dep_stack = self._dynamic_config._ignored_deps
1045 self._spinner_update()
1047 if not previously_added:
1048 dep_stack.append(pkg)
1051 def _check_masks(self, pkg):
1053 slot_key = (pkg.root, pkg.slot_atom)
1055 # Check for upgrades in the same slot that are
1056 # masked due to a LICENSE change in a newer
1057 # version that is not masked for any other reason.
1058 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1059 if other_pkg is not None and pkg < other_pkg:
1060 self._dynamic_config._masked_license_updates.add(other_pkg)
1062 def _add_parent_atom(self, pkg, parent_atom):
1063 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1064 if parent_atoms is None:
1065 parent_atoms = set()
1066 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1067 parent_atoms.add(parent_atom)
1069 def _add_slot_conflict(self, pkg):
1070 self._dynamic_config._slot_collision_nodes.add(pkg)
1071 slot_key = (pkg.slot_atom, pkg.root)
1072 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1073 if slot_nodes is None:
1075 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1076 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1079 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1081 mytype = pkg.type_name
1084 metadata = pkg.metadata
1085 myuse = self._pkg_use_enabled(pkg)
1087 depth = pkg.depth + 1
1088 removal_action = "remove" in self._dynamic_config.myparams
1091 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1093 edepend[k] = metadata[k]
1095 if not pkg.built and \
1096 "--buildpkgonly" in self._frozen_config.myopts and \
1097 "deep" not in self._dynamic_config.myparams:
1098 edepend["RDEPEND"] = ""
1099 edepend["PDEPEND"] = ""
1101 if pkg.built and not removal_action:
1102 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1103 # Pull in build time deps as requested, but marked them as
1104 # "optional" since they are not strictly required. This allows
1105 # more freedom in the merge order calculation for solving
1106 # circular dependencies. Don't convert to PDEPEND since that
1107 # could make --with-bdeps=y less effective if it is used to
1108 # adjust merge order to prevent built_with_use() calls from
1112 # built packages do not have build time dependencies.
1113 edepend["DEPEND"] = ""
1115 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1116 edepend["DEPEND"] = ""
1119 depend_root = myroot
1122 root_deps = self._frozen_config.myopts.get("--root-deps")
1123 if root_deps is not None:
1124 if root_deps is True:
1125 depend_root = myroot
1126 elif root_deps == "rdeps":
1127 edepend["DEPEND"] = ""
1130 (depend_root, edepend["DEPEND"],
1131 self._priority(buildtime=(not pkg.built),
1132 optional=pkg.built),
1134 (myroot, edepend["RDEPEND"],
1135 self._priority(runtime=True),
1137 (myroot, edepend["PDEPEND"],
1138 self._priority(runtime_post=True),
1142 debug = "--debug" in self._frozen_config.myopts
1143 strict = mytype != "installed"
1145 for dep_root, dep_string, dep_priority, ignore_blockers in deps:
1149 writemsg_level("\nParent: %s\n" % (pkg,),
1150 noiselevel=-1, level=logging.DEBUG)
1151 writemsg_level("Depstring: %s\n" % (dep_string,),
1152 noiselevel=-1, level=logging.DEBUG)
1153 writemsg_level("Priority: %s\n" % (dep_priority,),
1154 noiselevel=-1, level=logging.DEBUG)
1157 dep_string = portage.dep.use_reduce(dep_string,
1158 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1159 except portage.exception.InvalidDependString as e:
1160 if not pkg.installed:
1161 # should have been masked before it was selected
1165 # Try again, but omit the is_valid_flag argument, since
1166 # invalid USE conditionals are a common problem and it's
1167 # practical to ignore this issue for installed packages.
1169 dep_string = portage.dep.use_reduce(dep_string,
1170 uselist=self._pkg_use_enabled(pkg))
1171 except portage.exception.InvalidDependString as e:
1172 self._dynamic_config._masked_installed.add(pkg)
1177 dep_string = list(self._queue_disjunctive_deps(
1178 pkg, dep_root, dep_priority, dep_string))
1179 except portage.exception.InvalidDependString as e:
1181 self._dynamic_config._masked_installed.add(pkg)
1185 # should have been masked before it was selected
1191 dep_string = portage.dep.paren_enclose(dep_string,
1192 unevaluated_atom=True)
1194 if not self._add_pkg_dep_string(
1195 pkg, dep_root, dep_priority, dep_string,
1196 allow_unsatisfied, ignore_blockers=ignore_blockers):
1199 self._dynamic_config._traversed_pkg_deps.add(pkg)
1202 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1203 allow_unsatisfied, ignore_blockers=False):
1204 _autounmask_backup = self._dynamic_config._autounmask
1205 if dep_priority.optional:
1206 # Temporarily disable autounmask for deps that
1207 # don't necessarily need to be satisfied.
1208 self._dynamic_config._autounmask = False
1210 return self._wrapped_add_pkg_dep_string(
1211 pkg, dep_root, dep_priority, dep_string,
1212 allow_unsatisfied, ignore_blockers=ignore_blockers)
1214 self._dynamic_config._autounmask = _autounmask_backup
1216 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1217 dep_string, allow_unsatisfied, ignore_blockers=False):
1218 depth = pkg.depth + 1
1219 deep = self._dynamic_config.myparams.get("deep", 0)
1220 recurse_satisfied = deep is True or depth <= deep
1221 debug = "--debug" in self._frozen_config.myopts
1222 strict = pkg.type_name != "installed"
1225 writemsg_level("\nParent: %s\n" % (pkg,),
1226 noiselevel=-1, level=logging.DEBUG)
1227 writemsg_level("Depstring: %s\n" % (dep_string,),
1228 noiselevel=-1, level=logging.DEBUG)
1229 writemsg_level("Priority: %s\n" % (dep_priority,),
1230 noiselevel=-1, level=logging.DEBUG)
1233 selected_atoms = self._select_atoms(dep_root,
1234 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1235 strict=strict, priority=dep_priority)
1236 except portage.exception.InvalidDependString as e:
1238 self._dynamic_config._masked_installed.add(pkg)
1241 # should have been masked before it was selected
1245 writemsg_level("Candidates: %s\n" % \
1246 ([str(x) for x in selected_atoms[pkg]],),
1247 noiselevel=-1, level=logging.DEBUG)
1249 root_config = self._frozen_config.roots[dep_root]
1250 vardb = root_config.trees["vartree"].dbapi
1251 traversed_virt_pkgs = set()
1253 for atom, child in self._minimize_children(
1254 pkg, dep_priority, root_config, selected_atoms[pkg]):
1256 # If this was a specially generated virtual atom
1257 # from dep_check, map it back to the original, in
1258 # order to avoid distortion in places like display
1259 # or conflict resolution code.
1260 is_virt = hasattr(atom, '_orig_atom')
1261 atom = getattr(atom, '_orig_atom', atom)
1263 if ignore_blockers and atom.blocker:
1264 # For --with-bdeps, ignore build-time only blockers
1265 # that originate from built packages.
1268 mypriority = dep_priority.copy()
1269 if not atom.blocker:
1270 inst_pkgs = vardb.match_pkgs(atom)
1272 for inst_pkg in inst_pkgs:
1273 if self._pkg_visibility_check(inst_pkg):
1275 mypriority.satisfied = inst_pkg
1277 if not mypriority.satisfied:
1278 # none visible, so use highest
1279 mypriority.satisfied = inst_pkgs[0]
1281 dep = Dependency(atom=atom,
1282 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1283 priority=mypriority, root=dep_root)
1285 # In some cases, dep_check will return deps that shouldn't
1286 # be proccessed any further, so they are identified and
1287 # discarded here. Try to discard as few as possible since
1288 # discarded dependencies reduce the amount of information
1289 # available for optimization of merge order.
1291 if not atom.blocker and \
1292 not recurse_satisfied and \
1293 mypriority.satisfied and \
1294 mypriority.satisfied.visible and \
1295 dep.child is not None and \
1296 not dep.child.installed and \
1297 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1298 dep.child.slot_atom) is None:
1300 if dep.root == self._frozen_config.target_root:
1302 myarg = next(self._iter_atoms_for_pkg(dep.child))
1303 except StopIteration:
1305 except InvalidDependString:
1306 if not dep.child.installed:
1307 # This shouldn't happen since the package
1308 # should have been masked.
1312 # Existing child selection may not be valid unless
1313 # it's added to the graph immediately, since "complete"
1314 # mode may select a different child later.
1317 self._dynamic_config._ignored_deps.append(dep)
1320 if not self._add_dep(dep,
1321 allow_unsatisfied=allow_unsatisfied):
1323 if is_virt and dep.child is not None:
1324 traversed_virt_pkgs.add(dep.child)
1326 selected_atoms.pop(pkg)
1328 # Add selected indirect virtual deps to the graph. This
1329 # takes advantage of circular dependency avoidance that's done
1330 # by dep_zapdeps. We preserve actual parent/child relationships
1331 # here in order to avoid distorting the dependency graph like
1332 # <=portage-2.1.6.x did.
1333 for virt_dep, atoms in selected_atoms.items():
1335 virt_pkg = virt_dep.child
1336 if virt_pkg not in traversed_virt_pkgs:
1340 writemsg_level("Candidates: %s: %s\n" % \
1341 (virt_pkg.cpv, [str(x) for x in atoms]),
1342 noiselevel=-1, level=logging.DEBUG)
1344 if not self._add_pkg(virt_pkg, virt_dep):
1347 for atom, child in self._minimize_children(
1348 pkg, self._priority(runtime=True), root_config, atoms):
1350 # If this was a specially generated virtual atom
1351 # from dep_check, map it back to the original, in
1352 # order to avoid distortion in places like display
1353 # or conflict resolution code.
1354 is_virt = hasattr(atom, '_orig_atom')
1355 atom = getattr(atom, '_orig_atom', atom)
1357 # This is a GLEP 37 virtual, so its deps are all runtime.
1358 mypriority = self._priority(runtime=True)
1359 if not atom.blocker:
1360 inst_pkgs = vardb.match_pkgs(atom)
1362 for inst_pkg in inst_pkgs:
1363 if self._pkg_visibility_check(inst_pkg):
1365 mypriority.satisfied = inst_pkg
1367 if not mypriority.satisfied:
1368 # none visible, so use highest
1369 mypriority.satisfied = inst_pkgs[0]
1371 # Dependencies of virtuals are considered to have the
1372 # same depth as the virtual itself.
1373 dep = Dependency(atom=atom,
1374 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1375 parent=virt_pkg, priority=mypriority, root=dep_root)
1378 if not atom.blocker and \
1379 not recurse_satisfied and \
1380 mypriority.satisfied and \
1381 mypriority.satisfied.visible and \
1382 dep.child is not None and \
1383 not dep.child.installed and \
1384 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1385 dep.child.slot_atom) is None:
1387 if dep.root == self._frozen_config.target_root:
1389 myarg = next(self._iter_atoms_for_pkg(dep.child))
1390 except StopIteration:
1392 except InvalidDependString:
1393 if not dep.child.installed:
1399 self._dynamic_config._ignored_deps.append(dep)
1402 if not self._add_dep(dep,
1403 allow_unsatisfied=allow_unsatisfied):
1405 if is_virt and dep.child is not None:
1406 traversed_virt_pkgs.add(dep.child)
1409 writemsg_level("Exiting... %s\n" % (pkg,),
1410 noiselevel=-1, level=logging.DEBUG)
1414 def _minimize_children(self, parent, priority, root_config, atoms):
1416 Selects packages to satisfy the given atoms, and minimizes the
1417 number of selected packages. This serves to identify and eliminate
1418 redundant package selections when multiple atoms happen to specify
1428 dep_pkg, existing_node = self._select_package(
1429 root_config.root, atom)
1433 atom_pkg_map[atom] = dep_pkg
1435 if len(atom_pkg_map) < 2:
1436 for item in atom_pkg_map.items():
1442 for atom, pkg in atom_pkg_map.items():
1443 pkg_atom_map.setdefault(pkg, set()).add(atom)
1444 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1446 for cp, pkgs in cp_pkg_map.items():
1449 for atom in pkg_atom_map[pkg]:
1453 # Use a digraph to identify and eliminate any
1454 # redundant package selections.
1455 atom_pkg_graph = digraph()
1458 for atom in pkg_atom_map[pkg1]:
1460 atom_pkg_graph.add(pkg1, atom)
1461 atom_set = InternalPackageSet(initial_atoms=(atom,))
1465 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1466 atom_pkg_graph.add(pkg2, atom)
1469 eliminate_pkg = True
1470 for atom in atom_pkg_graph.parent_nodes(pkg):
1471 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1472 eliminate_pkg = False
1475 atom_pkg_graph.remove(pkg)
1477 # Yield ~, =*, < and <= atoms first, since those are more likely to
1478 # cause slot conflicts, and we want those atoms to be displayed
1479 # in the resulting slot conflict message (see bug #291142).
1482 for atom in cp_atoms:
1484 for child_pkg in atom_pkg_graph.child_nodes(atom):
1485 existing_node, matches = \
1486 self._check_slot_conflict(child_pkg, atom)
1487 if existing_node and not matches:
1491 conflict_atoms.append(atom)
1493 normal_atoms.append(atom)
1495 for atom in chain(conflict_atoms, normal_atoms):
1496 child_pkgs = atom_pkg_graph.child_nodes(atom)
1497 # if more than one child, yield highest version
1498 if len(child_pkgs) > 1:
1500 yield (atom, child_pkgs[-1])
1502 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1504 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1505 Yields non-disjunctive deps. Raises InvalidDependString when
1509 while i < len(dep_struct):
1511 if isinstance(x, list):
1512 for y in self._queue_disjunctive_deps(
1513 pkg, dep_root, dep_priority, x):
1516 self._queue_disjunction(pkg, dep_root, dep_priority,
1517 [ x, dep_struct[ i + 1 ] ] )
1521 x = portage.dep.Atom(x)
1522 except portage.exception.InvalidAtom:
1523 if not pkg.installed:
1524 raise portage.exception.InvalidDependString(
1525 "invalid atom: '%s'" % x)
1527 # Note: Eventually this will check for PROPERTIES=virtual
1528 # or whatever other metadata gets implemented for this
1530 if x.cp.startswith('virtual/'):
1531 self._queue_disjunction( pkg, dep_root,
1532 dep_priority, [ str(x) ] )
1537 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1538 self._dynamic_config._dep_disjunctive_stack.append(
1539 (pkg, dep_root, dep_priority, dep_struct))
1541 def _pop_disjunction(self, allow_unsatisfied):
1543 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1544 populate self._dynamic_config._dep_stack.
1546 pkg, dep_root, dep_priority, dep_struct = \
1547 self._dynamic_config._dep_disjunctive_stack.pop()
1548 dep_string = portage.dep.paren_enclose(dep_struct,
1549 unevaluated_atom=True)
1550 if not self._add_pkg_dep_string(
1551 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1555 def _priority(self, **kwargs):
1556 if "remove" in self._dynamic_config.myparams:
1557 priority_constructor = UnmergeDepPriority
1559 priority_constructor = DepPriority
1560 return priority_constructor(**kwargs)
1562 def _dep_expand(self, root_config, atom_without_category):
1564 @param root_config: a root config instance
1565 @type root_config: RootConfig
1566 @param atom_without_category: an atom without a category component
1567 @type atom_without_category: String
1569 @returns: a list of atoms containing categories (possibly empty)
1571 null_cp = portage.dep_getkey(insert_category_into_atom(
1572 atom_without_category, "null"))
1573 cat, atom_pn = portage.catsplit(null_cp)
1575 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1577 for db, pkg_type, built, installed, db_keys in dbs:
1578 for cat in db.categories:
1579 if db.cp_list("%s/%s" % (cat, atom_pn)):
1583 for cat in categories:
1584 deps.append(Atom(insert_category_into_atom(
1585 atom_without_category, cat)))
1588 def _have_new_virt(self, root, atom_cp):
1590 for db, pkg_type, built, installed, db_keys in \
1591 self._dynamic_config._filtered_trees[root]["dbs"]:
1592 if db.cp_list(atom_cp):
1597 def _iter_atoms_for_pkg(self, pkg):
1598 depgraph_sets = self._dynamic_config.sets[pkg.root]
1599 atom_arg_map = depgraph_sets.atom_arg_map
1600 root_config = self._frozen_config.roots[pkg.root]
1601 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1602 if atom.cp != pkg.cp and \
1603 self._have_new_virt(pkg.root, atom.cp):
1606 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1607 visible_pkgs.reverse() # descending order
1609 for visible_pkg in visible_pkgs:
1610 if visible_pkg.cp != atom.cp:
1612 if pkg >= visible_pkg:
1613 # This is descending order, and we're not
1614 # interested in any versions <= pkg given.
1616 if pkg.slot_atom != visible_pkg.slot_atom:
1617 higher_slot = visible_pkg
1619 if higher_slot is not None:
1621 for arg in atom_arg_map[(atom, pkg.root)]:
1622 if isinstance(arg, PackageArg) and \
1627 def select_files(self, myfiles):
1628 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1629 self._dynamic_config._initial_arg_list and call self._resolve to create the
1630 appropriate depgraph and return a favorite list."""
1632 debug = "--debug" in self._frozen_config.myopts
1633 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1634 sets = root_config.sets
1635 depgraph_sets = self._dynamic_config.sets[root_config.root]
1637 myroot = self._frozen_config.target_root
1638 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1639 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1640 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1641 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1642 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1643 pkgsettings = self._frozen_config.pkgsettings[myroot]
1645 onlydeps = "--onlydeps" in self._frozen_config.myopts
1648 ext = os.path.splitext(x)[1]
1650 if not os.path.exists(x):
1652 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1653 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1654 elif os.path.exists(
1655 os.path.join(pkgsettings["PKGDIR"], x)):
1656 x = os.path.join(pkgsettings["PKGDIR"], x)
1658 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1659 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1660 return 0, myfavorites
1661 mytbz2=portage.xpak.tbz2(x)
1662 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1663 if os.path.realpath(x) != \
1664 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1665 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1666 self._dynamic_config._skip_restart = True
1667 return 0, myfavorites
1669 pkg = self._pkg(mykey, "binary", root_config,
1671 args.append(PackageArg(arg=x, package=pkg,
1672 root_config=root_config))
1673 elif ext==".ebuild":
1674 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1675 pkgdir = os.path.dirname(ebuild_path)
1676 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1677 cp = pkgdir[len(tree_root)+1:]
1678 e = portage.exception.PackageNotFound(
1679 ("%s is not in a valid portage tree " + \
1680 "hierarchy or does not exist") % x)
1681 if not portage.isvalidatom(cp):
1683 cat = portage.catsplit(cp)[0]
1684 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1685 if not portage.isvalidatom("="+mykey):
1687 ebuild_path = portdb.findname(mykey)
1689 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1690 cp, os.path.basename(ebuild_path)):
1691 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1692 self._dynamic_config._skip_restart = True
1693 return 0, myfavorites
1694 if mykey not in portdb.xmatch(
1695 "match-visible", portage.cpv_getkey(mykey)):
1696 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1697 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1698 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1699 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1702 raise portage.exception.PackageNotFound(
1703 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1704 pkg = self._pkg(mykey, "ebuild", root_config,
1706 args.append(PackageArg(arg=x, package=pkg,
1707 root_config=root_config))
1708 elif x.startswith(os.path.sep):
1709 if not x.startswith(myroot):
1710 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1711 " $ROOT.\n") % x, noiselevel=-1)
1712 self._dynamic_config._skip_restart = True
1714 # Queue these up since it's most efficient to handle
1715 # multiple files in a single iter_owners() call.
1716 lookup_owners.append(x)
1717 elif x.startswith("." + os.sep) or \
1718 x.startswith(".." + os.sep):
1719 f = os.path.abspath(x)
1720 if not f.startswith(myroot):
1721 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
1722 " $ROOT.\n") % (f, x), noiselevel=-1)
1723 self._dynamic_config._skip_restart = True
1725 lookup_owners.append(f)
1727 if x in ("system", "world"):
1729 if x.startswith(SETPREFIX):
1730 s = x[len(SETPREFIX):]
1732 raise portage.exception.PackageSetNotFound(s)
1733 if s in depgraph_sets.sets:
1736 depgraph_sets.sets[s] = pset
1737 args.append(SetArg(arg=x, pset=pset,
1738 root_config=root_config))
1740 if not is_valid_package_atom(x):
1741 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1743 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1744 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1745 self._dynamic_config._skip_restart = True
1747 # Don't expand categories or old-style virtuals here unless
1748 # necessary. Expansion of old-style virtuals here causes at
1749 # least the following problems:
1750 # 1) It's more difficult to determine which set(s) an atom
1751 # came from, if any.
1752 # 2) It takes away freedom from the resolver to choose other
1753 # possible expansions when necessary.
1755 args.append(AtomArg(arg=x, atom=Atom(x),
1756 root_config=root_config))
1758 expanded_atoms = self._dep_expand(root_config, x)
1759 installed_cp_set = set()
1760 for atom in expanded_atoms:
1761 if vardb.cp_list(atom.cp):
1762 installed_cp_set.add(atom.cp)
1764 if len(installed_cp_set) > 1:
1765 non_virtual_cps = set()
1766 for atom_cp in installed_cp_set:
1767 if not atom_cp.startswith("virtual/"):
1768 non_virtual_cps.add(atom_cp)
1769 if len(non_virtual_cps) == 1:
1770 installed_cp_set = non_virtual_cps
1772 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1773 installed_cp = next(iter(installed_cp_set))
1774 for atom in expanded_atoms:
1775 if atom.cp == installed_cp:
1777 for pkg in self._iter_match_pkgs_any(
1778 root_config, atom.without_use,
1780 if not pkg.installed:
1784 expanded_atoms = [atom]
1787 # If a non-virtual package and one or more virtual packages
1788 # are in expanded_atoms, use the non-virtual package.
1789 if len(expanded_atoms) > 1:
1790 number_of_virtuals = 0
1791 for expanded_atom in expanded_atoms:
1792 if expanded_atom.cp.startswith("virtual/"):
1793 number_of_virtuals += 1
1795 candidate = expanded_atom
1796 if len(expanded_atoms) - number_of_virtuals == 1:
1797 expanded_atoms = [ candidate ]
1799 if len(expanded_atoms) > 1:
1800 writemsg("\n\n", noiselevel=-1)
1801 ambiguous_package_name(x, expanded_atoms, root_config,
1802 self._frozen_config.spinner, self._frozen_config.myopts)
1803 self._dynamic_config._skip_restart = True
1804 return False, myfavorites
1806 atom = expanded_atoms[0]
1808 null_atom = Atom(insert_category_into_atom(x, "null"))
1809 cat, atom_pn = portage.catsplit(null_atom.cp)
1810 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1812 # Allow the depgraph to choose which virtual.
1813 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1817 args.append(AtomArg(arg=x, atom=atom,
1818 root_config=root_config))
1822 search_for_multiple = False
1823 if len(lookup_owners) > 1:
1824 search_for_multiple = True
1826 for x in lookup_owners:
1827 if not search_for_multiple and os.path.isdir(x):
1828 search_for_multiple = True
1829 relative_paths.append(x[len(myroot)-1:])
1832 for pkg, relative_path in \
1833 real_vardb._owners.iter_owners(relative_paths):
1834 owners.add(pkg.mycpv)
1835 if not search_for_multiple:
1839 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1840 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1841 self._dynamic_config._skip_restart = True
1845 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1847 # portage now masks packages with missing slot, but it's
1848 # possible that one was installed by an older version
1849 atom = Atom(portage.cpv_getkey(cpv))
1851 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1852 args.append(AtomArg(arg=atom, atom=atom,
1853 root_config=root_config))
1855 if "--update" in self._frozen_config.myopts:
1856 # In some cases, the greedy slots behavior can pull in a slot that
1857 # the user would want to uninstall due to it being blocked by a
1858 # newer version in a different slot. Therefore, it's necessary to
1859 # detect and discard any that should be uninstalled. Each time
1860 # that arguments are updated, package selections are repeated in
1861 # order to ensure consistency with the current arguments:
1863 # 1) Initialize args
1864 # 2) Select packages and generate initial greedy atoms
1865 # 3) Update args with greedy atoms
1866 # 4) Select packages and generate greedy atoms again, while
1867 # accounting for any blockers between selected packages
1868 # 5) Update args with revised greedy atoms
1870 self._set_args(args)
1873 greedy_args.append(arg)
1874 if not isinstance(arg, AtomArg):
1876 for atom in self._greedy_slots(arg.root_config, arg.atom):
1878 AtomArg(arg=arg.arg, atom=atom,
1879 root_config=arg.root_config))
1881 self._set_args(greedy_args)
1884 # Revise greedy atoms, accounting for any blockers
1885 # between selected packages.
1886 revised_greedy_args = []
1888 revised_greedy_args.append(arg)
1889 if not isinstance(arg, AtomArg):
1891 for atom in self._greedy_slots(arg.root_config, arg.atom,
1892 blocker_lookahead=True):
1893 revised_greedy_args.append(
1894 AtomArg(arg=arg.arg, atom=atom,
1895 root_config=arg.root_config))
1896 args = revised_greedy_args
1897 del revised_greedy_args
1899 self._set_args(args)
1901 myfavorites = set(myfavorites)
1903 if isinstance(arg, (AtomArg, PackageArg)):
1904 myfavorites.add(arg.atom)
1905 elif isinstance(arg, SetArg):
1906 myfavorites.add(arg.arg)
1907 myfavorites = list(myfavorites)
1910 portage.writemsg("\n", noiselevel=-1)
1911 # Order needs to be preserved since a feature of --nodeps
1912 # is to allow the user to force a specific merge order.
1913 self._dynamic_config._initial_arg_list = args[:]
1915 return self._resolve(myfavorites)
1917 def _resolve(self, myfavorites):
1918 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1919 call self._creategraph to process theier deps and return
1921 debug = "--debug" in self._frozen_config.myopts
1922 onlydeps = "--onlydeps" in self._frozen_config.myopts
1923 myroot = self._frozen_config.target_root
1924 pkgsettings = self._frozen_config.pkgsettings[myroot]
1925 pprovideddict = pkgsettings.pprovideddict
1926 virtuals = pkgsettings.getvirtuals()
1927 for arg in self._expand_set_args(
1928 self._dynamic_config._initial_arg_list,
1929 add_to_digraph=True):
1930 for atom in arg.pset.getAtoms():
1931 self._spinner_update()
1932 dep = Dependency(atom=atom, onlydeps=onlydeps,
1933 root=myroot, parent=arg)
1935 pprovided = pprovideddict.get(atom.cp)
1936 if pprovided and portage.match_from_list(atom, pprovided):
1937 # A provided package has been specified on the command line.
1938 self._dynamic_config._pprovided_args.append((arg, atom))
1940 if isinstance(arg, PackageArg):
1941 if not self._add_pkg(arg.package, dep) or \
1942 not self._create_graph():
1943 if not self.need_restart():
1944 sys.stderr.write(("\n\n!!! Problem " + \
1945 "resolving dependencies for %s\n") % \
1947 return 0, myfavorites
1950 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1951 (arg, atom), noiselevel=-1)
1952 pkg, existing_node = self._select_package(
1953 myroot, atom, onlydeps=onlydeps)
1955 pprovided_match = False
1956 for virt_choice in virtuals.get(atom.cp, []):
1957 expanded_atom = portage.dep.Atom(
1958 atom.replace(atom.cp, virt_choice.cp, 1))
1959 pprovided = pprovideddict.get(expanded_atom.cp)
1961 portage.match_from_list(expanded_atom, pprovided):
1962 # A provided package has been
1963 # specified on the command line.
1964 self._dynamic_config._pprovided_args.append((arg, atom))
1965 pprovided_match = True
1970 if not (isinstance(arg, SetArg) and \
1971 arg.name in ("selected", "system", "world")):
1972 self._dynamic_config._unsatisfied_deps_for_display.append(
1973 ((myroot, atom), {"myparent" : arg}))
1974 return 0, myfavorites
1976 self._dynamic_config._missing_args.append((arg, atom))
1978 if atom.cp != pkg.cp:
1979 # For old-style virtuals, we need to repeat the
1980 # package.provided check against the selected package.
1981 expanded_atom = atom.replace(atom.cp, pkg.cp)
1982 pprovided = pprovideddict.get(pkg.cp)
1984 portage.match_from_list(expanded_atom, pprovided):
1985 # A provided package has been
1986 # specified on the command line.
1987 self._dynamic_config._pprovided_args.append((arg, atom))
1989 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1990 self._dynamic_config._unsatisfied_deps_for_display.append(
1991 ((myroot, atom), {"myparent" : arg}))
1992 # Previous behavior was to bail out in this case, but
1993 # since the dep is satisfied by the installed package,
1994 # it's more friendly to continue building the graph
1995 # and just show a warning message. Therefore, only bail
1996 # out here if the atom is not from either the system or
1998 if not (isinstance(arg, SetArg) and \
1999 arg.name in ("selected", "system", "world")):
2000 return 0, myfavorites
2002 # Add the selected package to the graph as soon as possible
2003 # so that later dep_check() calls can use it as feedback
2004 # for making more consistent atom selections.
2005 if not self._add_pkg(pkg, dep):
2006 if self.need_restart():
2008 elif isinstance(arg, SetArg):
2009 writemsg(("\n\n!!! Problem resolving " + \
2010 "dependencies for %s from %s\n") % \
2011 (atom, arg.arg), noiselevel=-1)
2013 writemsg(("\n\n!!! Problem resolving " + \
2014 "dependencies for %s\n") % \
2015 (atom,), noiselevel=-1)
2016 return 0, myfavorites
2018 except SystemExit as e:
2019 raise # Needed else can't exit
2020 except Exception as e:
2021 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2022 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2025 # Now that the root packages have been added to the graph,
2026 # process the dependencies.
2027 if not self._create_graph():
2028 return 0, myfavorites
2032 except self._unknown_internal_error:
2033 return False, myfavorites
2035 digraph_set = frozenset(self._dynamic_config.digraph)
2037 if digraph_set.intersection(
2038 self._dynamic_config._needed_unstable_keywords) or \
2039 digraph_set.intersection(
2040 self._dynamic_config._needed_use_config_changes) or \
2041 digraph_set.intersection(
2042 self._dynamic_config._needed_license_changes) :
2043 #We failed if the user needs to change the configuration
2044 return False, myfavorites
2048 # We're true here unless we are missing binaries.
2049 return (True, myfavorites)
2051 def _set_args(self, args):
2053 Create the "__non_set_args__" package set from atoms and packages given as
2054 arguments. This method can be called multiple times if necessary.
2055 The package selection cache is automatically invalidated, since
2056 arguments influence package selections.
2061 for root in self._dynamic_config.sets:
2062 depgraph_sets = self._dynamic_config.sets[root]
2063 depgraph_sets.sets.setdefault('__non_set_args__',
2064 InternalPackageSet()).clear()
2065 depgraph_sets.atoms.clear()
2066 depgraph_sets.atom_arg_map.clear()
2067 set_atoms[root] = []
2068 non_set_atoms[root] = []
2070 # We don't add set args to the digraph here since that
2071 # happens at a later stage and we don't want to make
2072 # any state changes here that aren't reversed by a
2073 # another call to this method.
2074 for arg in self._expand_set_args(args, add_to_digraph=False):
2075 atom_arg_map = self._dynamic_config.sets[
2076 arg.root_config.root].atom_arg_map
2077 if isinstance(arg, SetArg):
2078 atom_group = set_atoms[arg.root_config.root]
2080 atom_group = non_set_atoms[arg.root_config.root]
2082 for atom in arg.pset.getAtoms():
2083 atom_group.append(atom)
2084 atom_key = (atom, arg.root_config.root)
2085 refs = atom_arg_map.get(atom_key)
2088 atom_arg_map[atom_key] = refs
2092 for root in self._dynamic_config.sets:
2093 depgraph_sets = self._dynamic_config.sets[root]
2094 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2095 non_set_atoms.get(root, [])))
2096 depgraph_sets.sets['__non_set_args__'].update(
2097 non_set_atoms.get(root, []))
2099 # Invalidate the package selection cache, since
2100 # arguments influence package selections.
2101 self._dynamic_config._highest_pkg_cache.clear()
2102 for trees in self._dynamic_config._filtered_trees.values():
2103 trees["porttree"].dbapi._clear_cache()
2105 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2107 Return a list of slot atoms corresponding to installed slots that
2108 differ from the slot of the highest visible match. When
2109 blocker_lookahead is True, slot atoms that would trigger a blocker
2110 conflict are automatically discarded, potentially allowing automatic
2111 uninstallation of older slots when appropriate.
2113 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2114 if highest_pkg is None:
2116 vardb = root_config.trees["vartree"].dbapi
2118 for cpv in vardb.match(atom):
2119 # don't mix new virtuals with old virtuals
2120 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2121 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2123 slots.add(highest_pkg.metadata["SLOT"])
2127 slots.remove(highest_pkg.metadata["SLOT"])
2130 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2131 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2132 if pkg is not None and \
2133 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2134 greedy_pkgs.append(pkg)
2137 if not blocker_lookahead:
2138 return [pkg.slot_atom for pkg in greedy_pkgs]
2141 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2142 for pkg in greedy_pkgs + [highest_pkg]:
2143 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2145 selected_atoms = self._select_atoms(
2146 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2147 parent=pkg, strict=True)
2148 except portage.exception.InvalidDependString:
2151 for atoms in selected_atoms.values():
2152 blocker_atoms.extend(x for x in atoms if x.blocker)
2153 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2155 if highest_pkg not in blockers:
2158 # filter packages with invalid deps
2159 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2161 # filter packages that conflict with highest_pkg
2162 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2163 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2164 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2169 # If two packages conflict, discard the lower version.
2170 discard_pkgs = set()
2171 greedy_pkgs.sort(reverse=True)
2172 for i in range(len(greedy_pkgs) - 1):
2173 pkg1 = greedy_pkgs[i]
2174 if pkg1 in discard_pkgs:
2176 for j in range(i + 1, len(greedy_pkgs)):
2177 pkg2 = greedy_pkgs[j]
2178 if pkg2 in discard_pkgs:
2180 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2181 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2183 discard_pkgs.add(pkg2)
2185 return [pkg.slot_atom for pkg in greedy_pkgs \
2186 if pkg not in discard_pkgs]
2188 def _select_atoms_from_graph(self, *pargs, **kwargs):
2190 Prefer atoms matching packages that have already been
2191 added to the graph or those that are installed and have
2192 not been scheduled for replacement.
2194 kwargs["trees"] = self._dynamic_config._graph_trees
2195 return self._select_atoms_highest_available(*pargs, **kwargs)
2197 def _select_atoms_highest_available(self, root, depstring,
2198 myuse=None, parent=None, strict=True, trees=None, priority=None):
2199 """This will raise InvalidDependString if necessary. If trees is
2200 None then self._dynamic_config._filtered_trees is used."""
2202 pkgsettings = self._frozen_config.pkgsettings[root]
2204 trees = self._dynamic_config._filtered_trees
2205 mytrees = trees[root]
2206 atom_graph = digraph()
2208 # Temporarily disable autounmask so that || preferences
2209 # account for masking and USE settings.
2210 _autounmask_backup = self._dynamic_config._autounmask
2211 self._dynamic_config._autounmask = False
2212 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2214 if parent is not None:
2215 trees[root]["parent"] = parent
2216 trees[root]["atom_graph"] = atom_graph
2217 if priority is not None:
2218 trees[root]["priority"] = priority
2219 mycheck = portage.dep_check(depstring, None,
2220 pkgsettings, myuse=myuse,
2221 myroot=root, trees=trees)
2223 self._dynamic_config._autounmask = _autounmask_backup
2224 del mytrees["pkg_use_enabled"]
2225 if parent is not None:
2226 trees[root].pop("parent")
2227 trees[root].pop("atom_graph")
2228 if priority is not None:
2229 trees[root].pop("priority")
2231 raise portage.exception.InvalidDependString(mycheck[1])
2233 selected_atoms = mycheck[1]
2234 elif parent not in atom_graph:
2235 selected_atoms = {parent : mycheck[1]}
2237 # Recursively traversed virtual dependencies, and their
2238 # direct dependencies, are considered to have the same
2239 # depth as direct dependencies.
2240 if parent.depth is None:
2243 virt_depth = parent.depth + 1
2244 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2245 selected_atoms = OrderedDict()
2246 node_stack = [(parent, None, None)]
2247 traversed_nodes = set()
2249 node, node_parent, parent_atom = node_stack.pop()
2250 traversed_nodes.add(node)
2254 if node_parent is parent:
2255 if priority is None:
2256 node_priority = None
2258 node_priority = priority.copy()
2260 # virtuals only have runtime deps
2261 node_priority = self._priority(runtime=True)
2263 k = Dependency(atom=parent_atom,
2264 blocker=parent_atom.blocker, child=node,
2265 depth=virt_depth, parent=node_parent,
2266 priority=node_priority, root=node.root)
2269 selected_atoms[k] = child_atoms
2270 for atom_node in atom_graph.child_nodes(node):
2271 child_atom = atom_node[0]
2272 if id(child_atom) not in chosen_atom_ids:
2274 child_atoms.append(child_atom)
2275 for child_node in atom_graph.child_nodes(atom_node):
2276 if child_node in traversed_nodes:
2278 if not portage.match_from_list(
2279 child_atom, [child_node]):
2280 # Typically this means that the atom
2281 # specifies USE deps that are unsatisfied
2282 # by the selected package. The caller will
2283 # record this as an unsatisfied dependency
2286 node_stack.append((child_node, node, child_atom))
2288 return selected_atoms
2290 def _expand_virt_from_graph(self, root, atom):
2291 if not isinstance(atom, Atom):
2293 graphdb = self._dynamic_config.mydbapi[root]
2294 match = graphdb.match_pkgs(atom)
2299 if not pkg.cpv.startswith("virtual/"):
2303 rdepend = self._select_atoms_from_graph(
2304 pkg.root, pkg.metadata.get("RDEPEND", ""),
2305 myuse=self._pkg_use_enabled(pkg),
2306 parent=pkg, strict=False)
2307 except InvalidDependString as e:
2308 writemsg_level("!!! Invalid RDEPEND in " + \
2309 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2310 (pkg.root, pkg.cpv, e),
2311 noiselevel=-1, level=logging.ERROR)
2315 for atoms in rdepend.values():
2317 if hasattr(atom, "_orig_atom"):
2318 # Ignore virtual atoms since we're only
2319 # interested in expanding the real atoms.
2323 def _get_dep_chain(self, start_node, target_atom=None,
2324 unsatisfied_dependency=False):
2326 Returns a list of (atom, node_type) pairs that represent a dep chain.
2327 If target_atom is None, the first package shown is pkg's parent.
2328 If target_atom is not None the first package shown is pkg.
2329 If unsatisfied_dependency is True, the first parent is select who's
2330 dependency is not satisfied by 'pkg'. This is need for USE changes.
2331 (Does not support target_atom.)
2333 traversed_nodes = set()
2337 all_parents = self._dynamic_config._parent_atoms
2339 if target_atom is not None and isinstance(node, Package):
2340 affecting_use = set()
2341 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2343 affecting_use.update(extract_affecting_use(
2344 node.metadata[dep_str], target_atom))
2345 except InvalidDependString:
2346 if not node.installed:
2348 affecting_use.difference_update(node.use.mask, node.use.force)
2349 pkg_name = _unicode_decode("%s") % (node.cpv,)
2352 for flag in affecting_use:
2353 if flag in self._pkg_use_enabled(node):
2356 usedep.append("-"+flag)
2357 pkg_name += "[%s]" % ",".join(usedep)
2359 dep_chain.append((pkg_name, node.type_name))
2361 while node is not None:
2362 traversed_nodes.add(node)
2364 if isinstance(node, DependencyArg):
2365 if self._dynamic_config.digraph.parent_nodes(node):
2368 node_type = "argument"
2369 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2371 elif node is not start_node:
2372 for ppkg, patom in all_parents[child]:
2374 atom = patom.unevaluated_atom
2378 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2379 if priority.buildtime:
2380 dep_strings.add(node.metadata["DEPEND"])
2381 if priority.runtime:
2382 dep_strings.add(node.metadata["RDEPEND"])
2383 if priority.runtime_post:
2384 dep_strings.add(node.metadata["PDEPEND"])
2386 affecting_use = set()
2387 for dep_str in dep_strings:
2388 affecting_use.update(extract_affecting_use(dep_str, atom))
2390 #Don't show flags as 'affecting' if the user can't change them,
2391 affecting_use.difference_update(node.use.mask, \
2394 pkg_name = _unicode_decode("%s") % (node.cpv,)
2397 for flag in affecting_use:
2398 if flag in self._pkg_use_enabled(node):
2401 usedep.append("-"+flag)
2402 pkg_name += "[%s]" % ",".join(usedep)
2404 dep_chain.append((pkg_name, node.type_name))
2406 if node not in self._dynamic_config.digraph:
2407 # The parent is not in the graph due to backtracking.
2410 # When traversing to parents, prefer arguments over packages
2411 # since arguments are root nodes. Never traverse the same
2412 # package twice, in order to prevent an infinite loop.
2414 selected_parent = None
2417 parent_unsatisfied = None
2419 for parent in self._dynamic_config.digraph.parent_nodes(node):
2420 if parent in traversed_nodes:
2422 if isinstance(parent, DependencyArg):
2425 if isinstance(parent, Package) and \
2426 parent.operation == "merge":
2427 parent_merge = parent
2428 if unsatisfied_dependency and node is start_node:
2429 # Make sure that pkg doesn't satisfy parent's dependency.
2430 # This ensures that we select the correct parent for use
2432 for ppkg, atom in all_parents[start_node]:
2434 atom_set = InternalPackageSet(initial_atoms=(atom,))
2435 if not atom_set.findAtomForPackage(start_node):
2436 parent_unsatisfied = parent
2439 selected_parent = parent
2441 if parent_unsatisfied is not None:
2442 selected_parent = parent_unsatisfied
2443 elif parent_merge is not None:
2444 # Prefer parent in the merge list (bug #354747).
2445 selected_parent = parent_merge
2446 elif parent_arg is not None:
2447 if self._dynamic_config.digraph.parent_nodes(parent_arg):
2448 selected_parent = parent_arg
2451 (_unicode_decode("%s") % (parent_arg,), "argument"))
2452 selected_parent = None
2454 node = selected_parent
2457 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2458 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2460 for node, node_type in dep_chain:
2461 if node_type == "argument":
2462 display_list.append("required by %s (argument)" % node)
2464 display_list.append("required by %s" % node)
2466 msg = "#" + ", ".join(display_list) + "\n"
2470 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2471 check_backtrack=False):
2473 When check_backtrack=True, no output is produced and
2474 the method either returns or raises _backtrack_mask if
2475 a matching package has been masked by backtracking.
2477 backtrack_mask = False
2478 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,))
2479 xinfo = '"%s"' % atom.unevaluated_atom
2482 if isinstance(myparent, AtomArg):
2483 xinfo = _unicode_decode('"%s"') % (myparent,)
2484 # Discard null/ from failed cpv_expand category expansion.
2485 xinfo = xinfo.replace("null/", "")
2487 xinfo = "%s for %s" % (xinfo, root)
2488 masked_packages = []
2490 missing_use_adjustable = set()
2491 required_use_unsatisfied = []
2492 masked_pkg_instances = set()
2493 missing_licenses = []
2494 have_eapi_mask = False
2495 pkgsettings = self._frozen_config.pkgsettings[root]
2496 root_config = self._frozen_config.roots[root]
2497 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2498 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2499 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2500 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2501 for db, pkg_type, built, installed, db_keys in dbs:
2505 if hasattr(db, "xmatch"):
2506 cpv_list = db.xmatch("match-all", atom.without_use)
2508 cpv_list = db.match(atom.without_use)
2511 for cpv in cpv_list:
2512 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, \
2513 pkg_type, built, installed, db_keys, _pkg_use_enabled=self._pkg_use_enabled)
2515 if metadata is not None:
2516 pkg = self._pkg(cpv, pkg_type, root_config,
2517 installed=installed)
2518 # pkg.metadata contains calculated USE for ebuilds,
2519 # required later for getMissingLicenses.
2520 metadata = pkg.metadata
2521 if pkg.cp != atom.cp:
2522 # A cpv can be returned from dbapi.match() as an
2523 # old-style virtual match even in cases when the
2524 # package does not actually PROVIDE the virtual.
2525 # Filter out any such false matches here.
2526 if not atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2528 if pkg in self._dynamic_config._runtime_pkg_mask:
2529 backtrack_reasons = \
2530 self._dynamic_config._runtime_pkg_mask[pkg]
2531 mreasons.append('backtracking: %s' % \
2532 ', '.join(sorted(backtrack_reasons)))
2533 backtrack_mask = True
2534 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2535 modified_use=self._pkg_use_enabled(pkg)):
2536 mreasons = ["exclude option"]
2538 masked_pkg_instances.add(pkg)
2539 if atom.unevaluated_atom.use:
2541 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2542 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2543 missing_use.append(pkg)
2547 writemsg("violated_conditionals raised " + \
2548 "InvalidAtom: '%s' parent: %s" % \
2549 (atom, myparent), noiselevel=-1)
2551 if not mreasons and \
2553 pkg.metadata["REQUIRED_USE"] and \
2554 eapi_has_required_use(pkg.metadata["EAPI"]):
2555 if not check_required_use(
2556 pkg.metadata["REQUIRED_USE"],
2557 self._pkg_use_enabled(pkg),
2558 pkg.iuse.is_valid_flag):
2559 required_use_unsatisfied.append(pkg)
2561 if pkg.built and not mreasons:
2562 mreasons = ["use flag configuration mismatch"]
2563 masked_packages.append(
2564 (root_config, pkgsettings, cpv, metadata, mreasons))
2568 raise self._backtrack_mask()
2572 missing_use_reasons = []
2573 missing_iuse_reasons = []
2574 for pkg in missing_use:
2575 use = self._pkg_use_enabled(pkg)
2577 #Use the unevaluated atom here, because some flags might have gone
2578 #lost during evaluation.
2579 required_flags = atom.unevaluated_atom.use.required
2580 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2584 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2585 missing_iuse_reasons.append((pkg, mreasons))
2587 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2588 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2590 untouchable_flags = \
2591 frozenset(chain(pkg.use.mask, pkg.use.force))
2592 if untouchable_flags.intersection(
2593 chain(need_enable, need_disable)):
2596 missing_use_adjustable.add(pkg)
2597 required_use = pkg.metadata["REQUIRED_USE"]
2598 required_use_warning = ""
2600 old_use = self._pkg_use_enabled(pkg)
2601 new_use = set(self._pkg_use_enabled(pkg))
2602 for flag in need_enable:
2604 for flag in need_disable:
2605 new_use.discard(flag)
2606 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2607 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2608 required_use_warning = ", this change violates use flag constraints " + \
2609 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2611 if need_enable or need_disable:
2613 changes.extend(colorize("red", "+" + x) \
2614 for x in need_enable)
2615 changes.extend(colorize("blue", "-" + x) \
2616 for x in need_disable)
2617 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2618 missing_use_reasons.append((pkg, mreasons))
2620 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2621 # Lets see if the violated use deps are conditional.
2622 # If so, suggest to change them on the parent.
2624 # If the child package is masked then a change to
2625 # parent USE is not a valid solution (a normal mask
2626 # message should be displayed instead).
2627 if pkg in masked_pkg_instances:
2631 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2632 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2633 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2634 #all violated use deps are conditional
2636 conditional = violated_atom.use.conditional
2637 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2638 conditional.enabled, conditional.disabled))
2640 untouchable_flags = \
2641 frozenset(chain(myparent.use.mask, myparent.use.force))
2642 if untouchable_flags.intersection(involved_flags):
2645 required_use = myparent.metadata["REQUIRED_USE"]
2646 required_use_warning = ""
2648 old_use = self._pkg_use_enabled(myparent)
2649 new_use = set(self._pkg_use_enabled(myparent))
2650 for flag in involved_flags:
2652 new_use.discard(flag)
2655 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2656 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2657 required_use_warning = ", this change violates use flag constraints " + \
2658 "defined by %s: '%s'" % (myparent.cpv, \
2659 human_readable_required_use(required_use))
2661 for flag in involved_flags:
2662 if flag in self._pkg_use_enabled(myparent):
2663 changes.append(colorize("blue", "-" + flag))
2665 changes.append(colorize("red", "+" + flag))
2666 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2667 if (myparent, mreasons) not in missing_use_reasons:
2668 missing_use_reasons.append((myparent, mreasons))
2670 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2671 in missing_use_reasons if pkg not in masked_pkg_instances]
2673 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2674 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2676 show_missing_use = False
2677 if unmasked_use_reasons:
2678 # Only show the latest version.
2679 show_missing_use = []
2681 parent_reason = None
2682 for pkg, mreasons in unmasked_use_reasons:
2684 if parent_reason is None:
2685 #This happens if a use change on the parent
2686 #leads to a satisfied conditional use dep.
2687 parent_reason = (pkg, mreasons)
2688 elif pkg_reason is None:
2689 #Don't rely on the first pkg in unmasked_use_reasons,
2690 #being the highest version of the dependency.
2691 pkg_reason = (pkg, mreasons)
2693 show_missing_use.append(pkg_reason)
2695 show_missing_use.append(parent_reason)
2697 elif unmasked_iuse_reasons:
2698 masked_with_iuse = False
2699 for pkg in masked_pkg_instances:
2700 #Use atom.unevaluated here, because some flags might have gone
2701 #lost during evaluation.
2702 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2703 # Package(s) with required IUSE are masked,
2704 # so display a normal masking message.
2705 masked_with_iuse = True
2707 if not masked_with_iuse:
2708 show_missing_use = unmasked_iuse_reasons
2710 if required_use_unsatisfied:
2711 # If there's a higher unmasked version in missing_use_adjustable
2712 # then we want to show that instead.
2713 for pkg in missing_use_adjustable:
2714 if pkg not in masked_pkg_instances and \
2715 pkg > required_use_unsatisfied[0]:
2716 required_use_unsatisfied = False
2721 if required_use_unsatisfied:
2722 # We have an unmasked package that only requires USE adjustment
2723 # in order to satisfy REQUIRED_USE, and nothing more. We assume
2724 # that the user wants the latest version, so only the first
2725 # instance is displayed.
2726 pkg = required_use_unsatisfied[0]
2727 output_cpv = pkg.cpv
2728 writemsg_stdout("\n!!! " + \
2729 colorize("BAD", "The ebuild selected to satisfy ") + \
2730 colorize("INFORM", xinfo) + \
2731 colorize("BAD", " has unmet requirements.") + "\n",
2733 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
2734 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
2736 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
2737 "are unsatisfied:\n", noiselevel=-1)
2738 reduced_noise = check_required_use(
2739 pkg.metadata["REQUIRED_USE"],
2740 self._pkg_use_enabled(pkg),
2741 pkg.iuse.is_valid_flag).tounicode()
2742 writemsg_stdout(" %s\n" % \
2743 human_readable_required_use(reduced_noise),
2745 normalized_required_use = \
2746 " ".join(pkg.metadata["REQUIRED_USE"].split())
2747 if reduced_noise != normalized_required_use:
2748 writemsg_stdout("\n The above constraints " + \
2749 "are a subset of the following complete expression:\n",
2751 writemsg_stdout(" %s\n" % \
2752 human_readable_required_use(normalized_required_use),
2754 writemsg_stdout("\n", noiselevel=-1)
2756 elif show_missing_use:
2757 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2758 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
2759 for pkg, mreasons in show_missing_use:
2760 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
2762 elif masked_packages:
2763 writemsg_stdout("\n!!! " + \
2764 colorize("BAD", "All ebuilds that could satisfy ") + \
2765 colorize("INFORM", xinfo) + \
2766 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
2767 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
2768 have_eapi_mask = show_masked_packages(masked_packages)
2770 writemsg_stdout("\n", noiselevel=-1)
2771 msg = ("The current version of portage supports " + \
2772 "EAPI '%s'. You must upgrade to a newer version" + \
2773 " of portage before EAPI masked packages can" + \
2774 " be installed.") % portage.const.EAPI
2775 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
2776 writemsg_stdout("\n", noiselevel=-1)
2780 if not atom.cp.startswith("null/"):
2781 for pkg in self._iter_match_pkgs_any(
2782 root_config, Atom(atom.cp)):
2786 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2787 if isinstance(myparent, AtomArg) and \
2789 self._frozen_config.myopts.get(
2790 "--misspell-suggestions", "y") != "n":
2791 cp = myparent.atom.cp.lower()
2792 cat, pkg = portage.catsplit(cp)
2796 writemsg_stdout("\nemerge: searching for similar names..."
2800 all_cp.update(vardb.cp_all())
2801 all_cp.update(portdb.cp_all())
2802 if "--usepkg" in self._frozen_config.myopts:
2803 all_cp.update(bindb.cp_all())
2806 for cp_orig in all_cp:
2807 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
2808 all_cp = set(orig_cp_map)
2811 matches = difflib.get_close_matches(cp, all_cp)
2814 for other_cp in all_cp:
2815 other_pkg = portage.catsplit(other_cp)[1]
2816 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
2817 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
2819 for pkg_match in pkg_matches:
2820 matches.extend(pkg_to_cp[pkg_match])
2822 matches_orig_case = []
2824 matches_orig_case.extend(orig_cp_map[cp])
2825 matches = matches_orig_case
2827 if len(matches) == 1:
2828 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
2830 elif len(matches) > 1:
2832 "\nemerge: Maybe you meant any of these: %s?\n" % \
2833 (", ".join(matches),), noiselevel=-1)
2835 # Generally, this would only happen if
2836 # all dbapis are empty.
2837 writemsg_stdout(" nothing similar found.\n"
2840 if not isinstance(myparent, AtomArg):
2841 # It's redundant to show parent for AtomArg since
2842 # it's the same as 'xinfo' displayed above.
2843 dep_chain = self._get_dep_chain(myparent, atom)
2844 for node, node_type in dep_chain:
2845 msg.append('(dependency required by "%s" [%s])' % \
2846 (colorize('INFORM', _unicode_decode("%s") % \
2847 (node)), node_type))
2850 writemsg_stdout("\n".join(msg), noiselevel=-1)
2851 writemsg_stdout("\n", noiselevel=-1)
2855 writemsg_stdout("\n", noiselevel=-1)
2857 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
2858 for db, pkg_type, built, installed, db_keys in \
2859 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2860 for pkg in self._iter_match_pkgs(root_config,
2861 pkg_type, atom, onlydeps=onlydeps):
2864 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2866 Iterate over Package instances of pkg_type matching the given atom.
2867 This does not check visibility and it also does not match USE for
2868 unbuilt ebuilds since USE are lazily calculated after visibility
2869 checks (to avoid the expense when possible).
2872 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2874 if hasattr(db, "xmatch"):
2875 cpv_list = db.xmatch("match-all", atom)
2877 cpv_list = db.match(atom)
2879 # USE=multislot can make an installed package appear as if
2880 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2881 # won't do any good as long as USE=multislot is enabled since
2882 # the newly built package still won't have the expected slot.
2883 # Therefore, assume that such SLOT dependencies are already
2884 # satisfied rather than forcing a rebuild.
2885 installed = pkg_type == 'installed'
2886 if installed and not cpv_list and atom.slot:
2887 for cpv in db.match(atom.cp):
2888 slot_available = False
2889 for other_db, other_type, other_built, \
2890 other_installed, other_keys in \
2891 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2894 other_db.aux_get(cpv, ["SLOT"])[0]:
2895 slot_available = True
2899 if not slot_available:
2901 inst_pkg = self._pkg(cpv, "installed",
2902 root_config, installed=installed)
2903 # Remove the slot from the atom and verify that
2904 # the package matches the resulting atom.
2905 if portage.match_from_list(
2906 atom.without_slot, [inst_pkg]):
2914 for cpv in cpv_list:
2916 pkg = self._pkg(cpv, pkg_type, root_config,
2917 installed=installed, onlydeps=onlydeps)
2918 except portage.exception.PackageNotFound:
2921 if pkg.cp != atom.cp:
2922 # A cpv can be returned from dbapi.match() as an
2923 # old-style virtual match even in cases when the
2924 # package does not actually PROVIDE the virtual.
2925 # Filter out any such false matches here.
2926 if not InternalPackageSet(initial_atoms=(atom,)
2927 ).findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2931 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2932 cache_key = (root, atom, onlydeps)
2933 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2936 if pkg and not existing:
2937 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2938 if existing and existing == pkg:
2939 # Update the cache to reflect that the
2940 # package has been added to the graph.
2942 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2944 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2945 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2948 settings = pkg.root_config.settings
2949 if self._pkg_visibility_check(pkg) and \
2950 not (pkg.installed and pkg.masks):
2951 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2954 def _want_installed_pkg(self, pkg):
2956 Given an installed package returned from select_pkg, return
2957 True if the user has not explicitly requested for this package
2958 to be replaced (typically via an atom on the command line).
2960 if "selective" not in self._dynamic_config.myparams and \
2961 pkg.root == self._frozen_config.target_root:
2963 next(self._iter_atoms_for_pkg(pkg))
2964 except StopIteration:
2966 except portage.exception.InvalidDependString:
2972 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2973 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2975 default_selection = (pkg, existing)
2977 if self._dynamic_config._autounmask is True:
2978 if pkg is not None and \
2980 not self._want_installed_pkg(pkg):
2983 for only_use_changes in True, False:
2988 self._wrapped_select_pkg_highest_available_imp(
2989 root, atom, onlydeps=onlydeps,
2990 allow_use_changes=True,
2991 allow_unstable_keywords=(not only_use_changes),
2992 allow_license_changes=(not only_use_changes))
2994 if pkg is not None and \
2996 not self._want_installed_pkg(pkg):
2999 if self._dynamic_config._need_restart:
3003 # This ensures that we can fall back to an installed package
3004 # that may have been rejected in the autounmask path above.
3005 return default_selection
3007 return pkg, existing
3009 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False):
3014 if pkg in self._dynamic_config.digraph:
3015 # Sometimes we need to temporarily disable
3016 # dynamic_config._autounmask, but for overall
3017 # consistency in dependency resolution, in any
3018 # case we want to respect autounmask visibity
3019 # for packages that have already been added to
3020 # the dependency graph.
3023 if not self._dynamic_config._autounmask:
3026 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3027 root_config = self._frozen_config.roots[pkg.root]
3028 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
3030 masked_by_unstable_keywords = False
3031 missing_licenses = None
3032 masked_by_something_else = False
3034 for reason in mreasons:
3035 hint = reason.unmask_hint
3038 masked_by_something_else = True
3039 elif hint.key == "unstable keyword":
3040 masked_by_unstable_keywords = True
3041 elif hint.key == "license":
3042 missing_licenses = hint.value
3044 masked_by_something_else = True
3046 if masked_by_something_else:
3049 if pkg in self._dynamic_config._needed_unstable_keywords:
3050 #If the package is already keyworded, remove the mask.
3051 masked_by_unstable_keywords = False
3053 if missing_licenses:
3054 #If the needed licenses are already unmasked, remove the mask.
3055 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
3057 if not (masked_by_unstable_keywords or missing_licenses):
3058 #Package has already been unmasked.
3061 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
3062 (missing_licenses and not allow_license_changes):
3063 #We are not allowed to do the needed changes.
3066 if masked_by_unstable_keywords:
3067 self._dynamic_config._needed_unstable_keywords.add(pkg)
3069 if missing_licenses:
3070 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
3074 def _pkg_use_enabled(self, pkg, target_use=None):
3076 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
3077 If target_use is given, the need changes are computed to make the package useable.
3078 Example: target_use = { "foo": True, "bar": False }
3079 The flags target_use must be in the pkg's IUSE.
3081 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
3083 if target_use is None:
3084 if needed_use_config_change is None:
3085 return pkg.use.enabled
3087 return needed_use_config_change[0]
3089 if needed_use_config_change is not None:
3090 old_use = needed_use_config_change[0]
3092 old_changes = needed_use_config_change[1]
3093 new_changes = old_changes.copy()
3095 old_use = pkg.use.enabled
3100 for flag, state in target_use.items():
3102 if flag not in old_use:
3103 if new_changes.get(flag) == False:
3105 new_changes[flag] = True
3109 if new_changes.get(flag) == True:
3111 new_changes[flag] = False
3112 new_use.update(old_use.difference(target_use))
3114 def want_restart_for_use_change(pkg, new_use):
3115 if pkg not in self._dynamic_config.digraph.nodes:
3118 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
3119 dep = pkg.metadata[key]
3120 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3121 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3123 if old_val != new_val:
3126 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3127 if not parent_atoms:
3130 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
3131 for ppkg, atom in parent_atoms:
3132 if not atom.use or \
3133 not atom.use.required.intersection(changes):
3140 if new_changes != old_changes:
3141 #Don't do the change if it violates REQUIRED_USE.
3142 required_use = pkg.metadata["REQUIRED_USE"]
3143 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3144 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3147 if pkg.use.mask.intersection(new_changes) or \
3148 pkg.use.force.intersection(new_changes):
3151 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
3152 if want_restart_for_use_change(pkg, new_use):
3153 self._dynamic_config._need_restart = True
3156 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
3157 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False):
3158 root_config = self._frozen_config.roots[root]
3159 pkgsettings = self._frozen_config.pkgsettings[root]
3160 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3161 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3162 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3163 # List of acceptable packages, ordered by type preference.
3164 matched_packages = []
3165 matched_pkgs_ignore_use = []
3166 highest_version = None
3167 if not isinstance(atom, portage.dep.Atom):
3168 atom = portage.dep.Atom(atom)
3170 atom_set = InternalPackageSet(initial_atoms=(atom,))
3171 existing_node = None
3173 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3174 usepkg = "--usepkg" in self._frozen_config.myopts
3175 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3176 empty = "empty" in self._dynamic_config.myparams
3177 selective = "selective" in self._dynamic_config.myparams
3179 noreplace = "--noreplace" in self._frozen_config.myopts
3180 avoid_update = "--update" not in self._frozen_config.myopts
3181 dont_miss_updates = "--update" in self._frozen_config.myopts
3182 use_ebuild_visibility = self._frozen_config.myopts.get(
3183 '--use-ebuild-visibility', 'n') != 'n'
3184 # Behavior of the "selective" parameter depends on
3185 # whether or not a package matches an argument atom.
3186 # If an installed package provides an old-style
3187 # virtual that is no longer provided by an available
3188 # package, the installed package may match an argument
3189 # atom even though none of the available packages do.
3190 # Therefore, "selective" logic does not consider
3191 # whether or not an installed package matches an
3192 # argument atom. It only considers whether or not
3193 # available packages match argument atoms, which is
3194 # represented by the found_available_arg flag.
3195 found_available_arg = False
3196 packages_with_invalid_use_config = []
3197 for find_existing_node in True, False:
3200 for db, pkg_type, built, installed, db_keys in dbs:
3203 if installed and not find_existing_node:
3204 want_reinstall = reinstall or empty or \
3205 (found_available_arg and not selective)
3206 if want_reinstall and matched_packages:
3209 # Ignore USE deps for the initial match since we want to
3210 # ensure that updates aren't missed solely due to the user's
3211 # USE configuration.
3212 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3214 if pkg in self._dynamic_config._runtime_pkg_mask:
3215 # The package has been masked by the backtracking logic
3218 if not pkg.installed and \
3219 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3220 modified_use=self._pkg_use_enabled(pkg)):
3223 if packages_with_invalid_use_config and \
3224 (not pkg.installed or dont_miss_updates):
3225 # Check if a higher version was rejected due to user
3226 # USE configuration. The packages_with_invalid_use_config
3227 # list only contains unbuilt ebuilds since USE can't
3228 # be changed for built packages.
3229 higher_version_rejected = False
3230 for rejected in packages_with_invalid_use_config:
3231 if rejected.cp != pkg.cp:
3234 higher_version_rejected = True
3236 if higher_version_rejected:
3240 # Make --noreplace take precedence over --newuse.
3241 if not pkg.installed and noreplace and \
3242 cpv in vardb.match(atom):
3243 inst_pkg = self._pkg(pkg.cpv, "installed",
3244 root_config, installed=True)
3245 if inst_pkg.visible:
3246 # If the installed version is masked, it may
3247 # be necessary to look at lower versions,
3248 # in case there is a visible downgrade.
3250 reinstall_for_flags = None
3252 if not pkg.installed or \
3253 (matched_packages and not avoid_update):
3254 # Only enforce visibility on installed packages
3255 # if there is at least one other visible package
3256 # available. By filtering installed masked packages
3257 # here, packages that have been masked since they
3258 # were installed can be automatically downgraded
3259 # to an unmasked version. NOTE: This code needs to
3260 # be consistent with masking behavior inside
3261 # _dep_check_composite_db, in order to prevent
3262 # incorrect choices in || deps like bug #351828.
3264 if not self._pkg_visibility_check(pkg, \
3265 allow_unstable_keywords=allow_unstable_keywords,
3266 allow_license_changes=allow_license_changes):
3269 # Enable upgrade or downgrade to a version
3270 # with visible KEYWORDS when the installed
3271 # version is masked by KEYWORDS, but never
3272 # reinstall the same exact version only due
3273 # to a KEYWORDS mask. See bug #252167.
3275 if pkg.type_name != "ebuild" and matched_packages:
3276 # Don't re-install a binary package that is
3277 # identical to the currently installed package
3278 # (see bug #354441).
3279 identical_binary = False
3280 if usepkg and pkg.installed:
3281 for selected_pkg in matched_packages:
3282 if selected_pkg.type_name == "binary" and \
3283 selected_pkg.cpv == pkg.cpv and \
3284 selected_pkg.metadata.get('BUILD_TIME') == \
3285 pkg.metadata.get('BUILD_TIME'):
3286 identical_binary = True
3289 if not identical_binary:
3290 # If the ebuild no longer exists or it's
3291 # keywords have been dropped, reject built
3292 # instances (installed or binary).
3293 # If --usepkgonly is enabled, assume that
3294 # the ebuild status should be ignored.
3295 if not use_ebuild_visibility and usepkgonly:
3296 if pkg.installed and pkg.masks:
3301 pkg.cpv, "ebuild", root_config)
3302 except portage.exception.PackageNotFound:
3305 if not self._pkg_visibility_check(pkg_eb, \
3306 allow_unstable_keywords=allow_unstable_keywords,
3307 allow_license_changes=allow_license_changes):
3310 # Calculation of USE for unbuilt ebuilds is relatively
3311 # expensive, so it is only performed lazily, after the
3312 # above visibility checks are complete.
3315 if root == self._frozen_config.target_root:
3317 myarg = next(self._iter_atoms_for_pkg(pkg))
3318 except StopIteration:
3320 except portage.exception.InvalidDependString:
3322 # masked by corruption
3324 if not installed and myarg:
3325 found_available_arg = True
3327 if atom.unevaluated_atom.use:
3328 #Make sure we don't miss a 'missing IUSE'.
3329 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3330 # Don't add this to packages_with_invalid_use_config
3331 # since IUSE cannot be adjusted by the user.
3336 matched_pkgs_ignore_use.append(pkg)
3337 if allow_use_changes:
3339 for flag in atom.use.enabled:
3340 target_use[flag] = True
3341 for flag in atom.use.disabled:
3342 target_use[flag] = False
3343 use = self._pkg_use_enabled(pkg, target_use)
3345 use = self._pkg_use_enabled(pkg)
3348 can_adjust_use = not pkg.built
3349 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3350 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3352 if atom.use.enabled:
3353 if atom.use.enabled.intersection(missing_disabled):
3355 can_adjust_use = False
3356 need_enabled = atom.use.enabled.difference(use)
3358 need_enabled = need_enabled.difference(missing_enabled)
3362 if pkg.use.mask.intersection(need_enabled):
3363 can_adjust_use = False
3365 if atom.use.disabled:
3366 if atom.use.disabled.intersection(missing_enabled):
3368 can_adjust_use = False
3369 need_disabled = atom.use.disabled.intersection(use)
3371 need_disabled = need_disabled.difference(missing_disabled)
3375 if pkg.use.force.difference(
3376 pkg.use.mask).intersection(need_disabled):
3377 can_adjust_use = False
3381 # Above we must ensure that this package has
3382 # absolutely no use.force, use.mask, or IUSE
3383 # issues that the user typically can't make
3384 # adjustments to solve (see bug #345979).
3385 # FIXME: Conditional USE deps complicate
3386 # issues. This code currently excludes cases
3387 # in which the user can adjust the parent
3388 # package's USE in order to satisfy the dep.
3389 packages_with_invalid_use_config.append(pkg)
3392 if pkg.cp == atom_cp:
3393 if highest_version is None:
3394 highest_version = pkg
3395 elif pkg > highest_version:
3396 highest_version = pkg
3397 # At this point, we've found the highest visible
3398 # match from the current repo. Any lower versions
3399 # from this repo are ignored, so this so the loop
3400 # will always end with a break statement below
3402 if find_existing_node:
3403 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3406 # Use PackageSet.findAtomForPackage()
3407 # for PROVIDE support.
3408 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3409 if highest_version and \
3410 e_pkg.cp == atom_cp and \
3411 e_pkg < highest_version and \
3412 e_pkg.slot_atom != highest_version.slot_atom:
3413 # There is a higher version available in a
3414 # different slot, so this existing node is
3418 matched_packages.append(e_pkg)
3419 existing_node = e_pkg
3421 # Compare built package to current config and
3422 # reject the built package if necessary.
3423 if built and (not installed or matched_pkgs_ignore_use) and \
3424 ("--newuse" in self._frozen_config.myopts or \
3425 "--reinstall" in self._frozen_config.myopts or \
3426 "--binpkg-respect-use" in self._frozen_config.myopts):
3427 iuses = pkg.iuse.all
3428 old_use = self._pkg_use_enabled(pkg)
3430 pkgsettings.setcpv(myeb)
3432 pkgsettings.setcpv(pkg)
3433 now_use = pkgsettings["PORTAGE_USE"].split()
3434 forced_flags = set()
3435 forced_flags.update(pkgsettings.useforce)
3436 forced_flags.update(pkgsettings.usemask)
3438 if myeb and not usepkgonly:
3439 cur_iuse = myeb.iuse.all
3440 if self._reinstall_for_flags(forced_flags,
3444 # Compare current config to installed package
3445 # and do not reinstall if possible.
3446 if not installed and \
3447 ("--newuse" in self._frozen_config.myopts or \
3448 "--reinstall" in self._frozen_config.myopts) and \
3449 cpv in vardb.match(atom):
3450 forced_flags = set()
3451 forced_flags.update(pkg.use.force)
3452 forced_flags.update(pkg.use.mask)
3453 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
3454 old_use = inst_pkg.use.enabled
3455 old_iuse = inst_pkg.iuse.all
3456 cur_use = self._pkg_use_enabled(pkg)
3457 cur_iuse = pkg.iuse.all
3458 reinstall_for_flags = \
3459 self._reinstall_for_flags(
3460 forced_flags, old_use, old_iuse,
3462 if reinstall_for_flags:
3466 matched_packages.append(pkg)
3467 if reinstall_for_flags:
3468 self._dynamic_config._reinstall_nodes[pkg] = \
3472 if not matched_packages:
3475 if "--debug" in self._frozen_config.myopts:
3476 for pkg in matched_packages:
3477 portage.writemsg("%s %s\n" % \
3478 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3480 # Filter out any old-style virtual matches if they are
3481 # mixed with new-style virtual matches.
3483 if len(matched_packages) > 1 and \
3484 "virtual" == portage.catsplit(cp)[0]:
3485 for pkg in matched_packages:
3488 # Got a new-style virtual, so filter
3489 # out any old-style virtuals.
3490 matched_packages = [pkg for pkg in matched_packages \
3494 if existing_node is not None and \
3495 existing_node in matched_packages:
3496 return existing_node, existing_node
3498 if len(matched_packages) > 1:
3499 if rebuilt_binaries:
3502 for pkg in matched_packages:
3507 if built_pkg is not None and inst_pkg is not None:
3508 # Only reinstall if binary package BUILD_TIME is
3509 # non-empty, in order to avoid cases like to
3510 # bug #306659 where BUILD_TIME fields are missing
3511 # in local and/or remote Packages file.
3513 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3514 except (KeyError, ValueError):
3518 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3519 except (KeyError, ValueError):
3520 installed_timestamp = 0
3522 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3523 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3524 if built_timestamp and \
3525 built_timestamp > installed_timestamp and \
3526 built_timestamp >= minimal_timestamp:
3527 return built_pkg, existing_node
3529 #Don't care if the binary has an older BUILD_TIME than the installed
3530 #package. This is for closely tracking a binhost.
3531 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3533 if built_timestamp and \
3534 built_timestamp != installed_timestamp:
3535 return built_pkg, existing_node
3537 for pkg in matched_packages:
3538 if pkg.installed and pkg.invalid:
3539 matched_packages = [x for x in \
3540 matched_packages if x is not pkg]
3543 for pkg in matched_packages:
3544 if pkg.installed and self._pkg_visibility_check(pkg, \
3545 allow_unstable_keywords=allow_unstable_keywords,
3546 allow_license_changes=allow_license_changes):
3547 return pkg, existing_node
3549 bestmatch = portage.best(
3550 [pkg.cpv for pkg in matched_packages \
3551 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3552 allow_license_changes=allow_license_changes)])
3554 # all are masked, so ignore visibility
3555 bestmatch = portage.best(
3556 [pkg.cpv for pkg in matched_packages])
3557 matched_packages = [pkg for pkg in matched_packages \
3558 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3560 # ordered by type preference ("ebuild" type is the last resort)
3561 return matched_packages[-1], existing_node
3563 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3565 Select packages that have already been added to the graph or
3566 those that are installed and have not been scheduled for
3569 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3570 matches = graph_db.match_pkgs(atom)
3573 pkg = matches[-1] # highest match
3574 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3575 return pkg, in_graph
3577 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3579 Select packages that are installed.
3581 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3582 matches = vardb.match_pkgs(atom)
3585 if len(matches) > 1:
3586 unmasked = [pkg for pkg in matches if \
3587 self._pkg_visibility_check(pkg)]
3589 if len(unmasked) == 1:
3592 # Account for packages with masks (like KEYWORDS masks)
3593 # that are usually ignored in visibility checks for
3594 # installed packages, in order to handle cases like
3596 unmasked = [pkg for pkg in matches if not pkg.masks]
3599 pkg = matches[-1] # highest match
3600 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3601 return pkg, in_graph
3603 def _complete_graph(self, required_sets=None):
3605 Add any deep dependencies of required sets (args, system, world) that
3606 have not been pulled into the graph yet. This ensures that the graph
3607 is consistent such that initially satisfied deep dependencies are not
3608 broken in the new graph. Initially unsatisfied dependencies are
3609 irrelevant since we only want to avoid breaking dependencies that are
3610 initially satisfied.
3612 Since this method can consume enough time to disturb users, it is
3613 currently only enabled by the --complete-graph option.
3615 @param required_sets: contains required sets (currently only used
3616 for depclean and prune removal operations)
3617 @type required_sets: dict
3619 if "--buildpkgonly" in self._frozen_config.myopts or \
3620 "recurse" not in self._dynamic_config.myparams:
3623 if "complete" not in self._dynamic_config.myparams:
3624 # Automatically enable complete mode if there are any
3625 # downgrades, since they often break dependencies
3626 # (like in bug #353613).
3627 have_downgrade = False
3628 for node in self._dynamic_config.digraph:
3629 if not isinstance(node, Package) or \
3630 node.operation != "merge":
3632 vardb = self._frozen_config.roots[
3633 node.root].trees["vartree"].dbapi
3634 inst_pkg = vardb.match_pkgs(node.slot_atom)
3635 if inst_pkg and inst_pkg[0] > node:
3636 have_downgrade = True
3640 self._dynamic_config.myparams["complete"] = True
3642 # Skip complete graph mode, in order to avoid consuming
3643 # enough time to disturb users.
3648 # Put the depgraph into a mode that causes it to only
3649 # select packages that have already been added to the
3650 # graph or those that are installed and have not been
3651 # scheduled for replacement. Also, toggle the "deep"
3652 # parameter so that all dependencies are traversed and
3654 self._select_atoms = self._select_atoms_from_graph
3655 if "remove" in self._dynamic_config.myparams:
3656 self._select_package = self._select_pkg_from_installed
3658 self._select_package = self._select_pkg_from_graph
3659 already_deep = self._dynamic_config.myparams.get("deep") is True
3660 if not already_deep:
3661 self._dynamic_config.myparams["deep"] = True
3663 # Invalidate the package selection cache, since
3664 # _select_package has just changed implementations.
3665 for trees in self._dynamic_config._filtered_trees.values():
3666 trees["porttree"].dbapi._clear_cache()
3668 args = self._dynamic_config._initial_arg_list[:]
3669 for root in self._frozen_config.roots:
3670 if root != self._frozen_config.target_root and \
3671 "remove" in self._dynamic_config.myparams:
3672 # Only pull in deps for the relevant root.
3674 depgraph_sets = self._dynamic_config.sets[root]
3675 required_set_names = self._frozen_config._required_set_names.copy()
3676 remaining_args = required_set_names.copy()
3677 if required_sets is None or root not in required_sets:
3680 # Removal actions may override sets with temporary
3681 # replacements that have had atoms removed in order
3682 # to implement --deselect behavior.
3683 required_set_names = set(required_sets[root])
3684 depgraph_sets.sets.clear()
3685 depgraph_sets.sets.update(required_sets[root])
3686 if "remove" not in self._dynamic_config.myparams and \
3687 root == self._frozen_config.target_root and \
3689 remaining_args.difference_update(depgraph_sets.sets)
3690 if not remaining_args and \
3691 not self._dynamic_config._ignored_deps and \
3692 not self._dynamic_config._dep_stack:
3694 root_config = self._frozen_config.roots[root]
3695 for s in required_set_names:
3696 pset = depgraph_sets.sets.get(s)
3698 pset = root_config.sets[s]
3699 atom = SETPREFIX + s
3700 args.append(SetArg(arg=atom, pset=pset,
3701 root_config=root_config))
3703 self._set_args(args)
3704 for arg in self._expand_set_args(args, add_to_digraph=True):
3705 for atom in arg.pset.getAtoms():
3706 self._dynamic_config._dep_stack.append(
3707 Dependency(atom=atom, root=arg.root_config.root,
3711 if self._dynamic_config._ignored_deps:
3712 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
3713 self._dynamic_config._ignored_deps = []
3714 if not self._create_graph(allow_unsatisfied=True):
3716 # Check the unsatisfied deps to see if any initially satisfied deps
3717 # will become unsatisfied due to an upgrade. Initially unsatisfied
3718 # deps are irrelevant since we only want to avoid breaking deps
3719 # that are initially satisfied.
3720 while self._dynamic_config._unsatisfied_deps:
3721 dep = self._dynamic_config._unsatisfied_deps.pop()
3722 vardb = self._frozen_config.roots[
3723 dep.root].trees["vartree"].dbapi
3724 matches = vardb.match_pkgs(dep.atom)
3726 self._dynamic_config._initially_unsatisfied_deps.append(dep)
3728 # An scheduled installation broke a deep dependency.
3729 # Add the installed package to the graph so that it
3730 # will be appropriately reported as a slot collision
3731 # (possibly solvable via backtracking).
3732 pkg = matches[-1] # highest match
3733 if not self._add_pkg(pkg, dep):
3735 if not self._create_graph(allow_unsatisfied=True):
3739 def _pkg(self, cpv, type_name, root_config, installed=False,
3742 Get a package instance from the cache, or create a new
3743 one if necessary. Raises PackageNotFound from aux_get if it
3744 failures for some reason (package does not exist or is
3748 if installed or onlydeps:
3749 operation = "nomerge"
3750 # Ensure that we use the specially optimized RootConfig instance
3751 # that refers to FakeVartree instead of the real vartree.
3752 root_config = self._frozen_config.roots[root_config.root]
3753 pkg = self._frozen_config._pkg_cache.get(
3754 (type_name, root_config.root, cpv, operation))
3755 if pkg is None and onlydeps and not installed:
3756 # Maybe it already got pulled in as a "merge" node.
3757 pkg = self._dynamic_config.mydbapi[root_config.root].get(
3758 (type_name, root_config.root, cpv, 'merge'))
3761 tree_type = self.pkg_tree_map[type_name]
3762 db = root_config.trees[tree_type].dbapi
3763 db_keys = list(self._frozen_config._trees_orig[root_config.root][
3764 tree_type].dbapi._aux_cache_keys)
3766 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
3768 raise portage.exception.PackageNotFound(cpv)
3769 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
3770 installed=installed, metadata=metadata, onlydeps=onlydeps,
3771 root_config=root_config, type_name=type_name)
3772 self._frozen_config._pkg_cache[pkg] = pkg
3774 if not self._pkg_visibility_check(pkg) and \
3775 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
3776 slot_key = (pkg.root, pkg.slot_atom)
3777 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
3778 if other_pkg is None or pkg > other_pkg:
3779 self._frozen_config._highest_license_masked[slot_key] = pkg
3783 def _validate_blockers(self):
3784 """Remove any blockers from the digraph that do not match any of the
3785 packages within the graph. If necessary, create hard deps to ensure
3786 correct merge order such that mutually blocking packages are never
3787 installed simultaneously."""
3789 if "--buildpkgonly" in self._frozen_config.myopts or \
3790 "--nodeps" in self._frozen_config.myopts:
3793 complete = "complete" in self._dynamic_config.myparams
3794 deep = "deep" in self._dynamic_config.myparams
3797 # Pull in blockers from all installed packages that haven't already
3798 # been pulled into the depgraph. This is not enabled by default
3799 # due to the performance penalty that is incurred by all the
3800 # additional dep_check calls that are required.
3802 # For installed packages, always ignore blockers from DEPEND since
3803 # only runtime dependencies should be relevant for packages that
3804 # are already built.
3805 dep_keys = ["RDEPEND", "PDEPEND"]
3806 for myroot in self._frozen_config.trees:
3807 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
3808 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
3809 pkgsettings = self._frozen_config.pkgsettings[myroot]
3810 root_config = self._frozen_config.roots[myroot]
3811 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
3812 final_db = self._dynamic_config.mydbapi[myroot]
3814 blocker_cache = BlockerCache(myroot, vardb)
3815 stale_cache = set(blocker_cache)
3818 stale_cache.discard(cpv)
3819 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
3821 pkg in self._dynamic_config._traversed_pkg_deps
3823 # Check for masked installed packages. Only warn about
3824 # packages that are in the graph in order to avoid warning
3825 # about those that will be automatically uninstalled during
3826 # the merge process or by --depclean. Always warn about
3827 # packages masked by license, since the user likely wants
3828 # to adjust ACCEPT_LICENSE.
3830 if not self._pkg_visibility_check(pkg) and \
3831 (pkg_in_graph or 'LICENSE' in pkg.masks):
3832 self._dynamic_config._masked_installed.add(pkg)
3834 self._check_masks(pkg)
3836 blocker_atoms = None
3842 self._dynamic_config._blocker_parents.child_nodes(pkg))
3847 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
3851 # Select just the runtime blockers.
3852 blockers = [blocker for blocker in blockers \
3853 if blocker.priority.runtime or \
3854 blocker.priority.runtime_post]
3855 if blockers is not None:
3856 blockers = set(blocker.atom for blocker in blockers)
3858 # If this node has any blockers, create a "nomerge"
3859 # node for it so that they can be enforced.
3860 self._spinner_update()
3861 blocker_data = blocker_cache.get(cpv)
3862 if blocker_data is not None and \
3863 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3866 # If blocker data from the graph is available, use
3867 # it to validate the cache and update the cache if
3869 if blocker_data is not None and \
3870 blockers is not None:
3871 if not blockers.symmetric_difference(
3872 blocker_data.atoms):
3876 if blocker_data is None and \
3877 blockers is not None:
3878 # Re-use the blockers from the graph.
3879 blocker_atoms = sorted(blockers)
3880 counter = long(pkg.metadata["COUNTER"])
3882 blocker_cache.BlockerData(counter, blocker_atoms)
3883 blocker_cache[pkg.cpv] = blocker_data
3887 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
3889 # Use aux_get() to trigger FakeVartree global
3890 # updates on *DEPEND when appropriate.
3891 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3892 # It is crucial to pass in final_db here in order to
3893 # optimize dep_check calls by eliminating atoms via
3894 # dep_wordreduce and dep_eval calls.
3896 success, atoms = portage.dep_check(depstr,
3897 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
3898 trees=self._dynamic_config._graph_trees, myroot=myroot)
3901 except Exception as e:
3902 # This is helpful, for example, if a ValueError
3903 # is thrown from cpv_expand due to multiple
3904 # matches (this can happen if an atom lacks a
3906 show_invalid_depstring_notice(
3907 pkg, depstr, str(e))
3911 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3912 if replacement_pkg and \
3913 replacement_pkg[0].operation == "merge":
3914 # This package is being replaced anyway, so
3915 # ignore invalid dependencies so as not to
3916 # annoy the user too much (otherwise they'd be
3917 # forced to manually unmerge it first).
3919 show_invalid_depstring_notice(pkg, depstr, atoms)
3921 blocker_atoms = [myatom for myatom in atoms \
3923 blocker_atoms.sort()
3924 counter = long(pkg.metadata["COUNTER"])
3925 blocker_cache[cpv] = \
3926 blocker_cache.BlockerData(counter, blocker_atoms)
3929 for atom in blocker_atoms:
3930 blocker = Blocker(atom=atom,
3931 eapi=pkg.metadata["EAPI"],
3932 priority=self._priority(runtime=True),
3934 self._dynamic_config._blocker_parents.add(blocker, pkg)
3935 except portage.exception.InvalidAtom as e:
3936 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3937 show_invalid_depstring_notice(
3938 pkg, depstr, "Invalid Atom: %s" % (e,))
3940 for cpv in stale_cache:
3941 del blocker_cache[cpv]
3942 blocker_cache.flush()
3945 # Discard any "uninstall" tasks scheduled by previous calls
3946 # to this method, since those tasks may not make sense given
3947 # the current graph state.
3948 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
3949 if previous_uninstall_tasks:
3950 self._dynamic_config._blocker_uninstalls = digraph()
3951 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
3953 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
3954 self._spinner_update()
3955 root_config = self._frozen_config.roots[blocker.root]
3956 virtuals = root_config.settings.getvirtuals()
3957 myroot = blocker.root
3958 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
3959 final_db = self._dynamic_config.mydbapi[myroot]
3961 provider_virtual = False
3962 if blocker.cp in virtuals and \
3963 not self._have_new_virt(blocker.root, blocker.cp):
3964 provider_virtual = True
3966 # Use this to check PROVIDE for each matched package
3968 atom_set = InternalPackageSet(
3969 initial_atoms=[blocker.atom])
3971 if provider_virtual:
3973 for provider_entry in virtuals[blocker.cp]:
3974 atoms.append(Atom(blocker.atom.replace(
3975 blocker.cp, provider_entry.cp, 1)))
3977 atoms = [blocker.atom]
3979 blocked_initial = set()
3981 for pkg in initial_db.match_pkgs(atom):
3982 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3983 blocked_initial.add(pkg)
3985 blocked_final = set()
3987 for pkg in final_db.match_pkgs(atom):
3988 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3989 blocked_final.add(pkg)
3991 if not blocked_initial and not blocked_final:
3992 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
3993 self._dynamic_config._blocker_parents.remove(blocker)
3994 # Discard any parents that don't have any more blockers.
3995 for pkg in parent_pkgs:
3996 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
3997 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3998 self._dynamic_config._blocker_parents.remove(pkg)
4000 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4001 unresolved_blocks = False
4002 depends_on_order = set()
4003 for pkg in blocked_initial:
4004 if pkg.slot_atom == parent.slot_atom and \
4005 not blocker.atom.blocker.overlap.forbid:
4006 # New !!atom blockers do not allow temporary
4007 # simulaneous installation, so unlike !atom
4008 # blockers, !!atom blockers aren't ignored
4009 # when they match other packages occupying
4012 if parent.installed:
4013 # Two currently installed packages conflict with
4014 # eachother. Ignore this case since the damage
4015 # is already done and this would be likely to
4016 # confuse users if displayed like a normal blocker.
4019 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4021 if parent.operation == "merge":
4022 # Maybe the blocked package can be replaced or simply
4023 # unmerged to resolve this block.
4024 depends_on_order.add((pkg, parent))
4026 # None of the above blocker resolutions techniques apply,
4027 # so apparently this one is unresolvable.
4028 unresolved_blocks = True
4029 for pkg in blocked_final:
4030 if pkg.slot_atom == parent.slot_atom and \
4031 not blocker.atom.blocker.overlap.forbid:
4032 # New !!atom blockers do not allow temporary
4033 # simulaneous installation, so unlike !atom
4034 # blockers, !!atom blockers aren't ignored
4035 # when they match other packages occupying
4038 if parent.operation == "nomerge" and \
4039 pkg.operation == "nomerge":
4040 # This blocker will be handled the next time that a
4041 # merge of either package is triggered.
4044 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4046 # Maybe the blocking package can be
4047 # unmerged to resolve this block.
4048 if parent.operation == "merge" and pkg.installed:
4049 depends_on_order.add((pkg, parent))
4051 elif parent.operation == "nomerge":
4052 depends_on_order.add((parent, pkg))
4054 # None of the above blocker resolutions techniques apply,
4055 # so apparently this one is unresolvable.
4056 unresolved_blocks = True
4058 # Make sure we don't unmerge any package that have been pulled
4060 if not unresolved_blocks and depends_on_order:
4061 for inst_pkg, inst_task in depends_on_order:
4062 if self._dynamic_config.digraph.contains(inst_pkg) and \
4063 self._dynamic_config.digraph.parent_nodes(inst_pkg):
4064 unresolved_blocks = True
4067 if not unresolved_blocks and depends_on_order:
4068 for inst_pkg, inst_task in depends_on_order:
4069 uninst_task = Package(built=inst_pkg.built,
4070 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4071 metadata=inst_pkg.metadata,
4072 operation="uninstall",
4073 root_config=inst_pkg.root_config,
4074 type_name=inst_pkg.type_name)
4075 # Enforce correct merge order with a hard dep.
4076 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
4077 priority=BlockerDepPriority.instance)
4078 # Count references to this blocker so that it can be
4079 # invalidated after nodes referencing it have been
4081 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
4082 if not unresolved_blocks and not depends_on_order:
4083 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
4084 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
4085 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
4086 self._dynamic_config._blocker_parents.remove(blocker)
4087 if not self._dynamic_config._blocker_parents.child_nodes(parent):
4088 self._dynamic_config._blocker_parents.remove(parent)
4089 if unresolved_blocks:
4090 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
4094 def _accept_blocker_conflicts(self):
4096 for x in ("--buildpkgonly", "--fetchonly",
4097 "--fetch-all-uri", "--nodeps"):
4098 if x in self._frozen_config.myopts:
4103 def _merge_order_bias(self, mygraph):
4105 For optimal leaf node selection, promote deep system runtime deps and
4106 order nodes from highest to lowest overall reference count.
4110 for node in mygraph.order:
4111 node_info[node] = len(mygraph.parent_nodes(node))
4112 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4114 def cmp_merge_preference(node1, node2):
4116 if node1.operation == 'uninstall':
4117 if node2.operation == 'uninstall':
4121 if node2.operation == 'uninstall':
4122 if node1.operation == 'uninstall':
4126 node1_sys = node1 in deep_system_deps
4127 node2_sys = node2 in deep_system_deps
4128 if node1_sys != node2_sys:
4133 return node_info[node2] - node_info[node1]
4135 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4137 def altlist(self, reversed=False):
4139 while self._dynamic_config._serialized_tasks_cache is None:
4140 self._resolve_conflicts()
4142 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
4143 self._serialize_tasks()
4144 except self._serialize_tasks_retry:
4147 retlist = self._dynamic_config._serialized_tasks_cache[:]
4152 def _implicit_libc_deps(self, mergelist, graph):
4154 Create implicit dependencies on libc, in order to ensure that libc
4155 is installed as early as possible (see bug #303567).
4158 implicit_libc_roots = (self._frozen_config._running_root.root,)
4159 for root in implicit_libc_roots:
4160 graphdb = self._dynamic_config.mydbapi[root]
4161 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4162 for atom in self._expand_virt_from_graph(root,
4163 portage.const.LIBC_PACKAGE_ATOM):
4166 match = graphdb.match_pkgs(atom)
4170 if pkg.operation == "merge" and \
4171 not vardb.cpv_exists(pkg.cpv):
4172 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
4177 earlier_libc_pkgs = set()
4179 for pkg in mergelist:
4180 if not isinstance(pkg, Package):
4181 # a satisfied blocker
4183 root_libc_pkgs = libc_pkgs.get(pkg.root)
4184 if root_libc_pkgs is not None and \
4185 pkg.operation == "merge":
4186 if pkg in root_libc_pkgs:
4187 earlier_libc_pkgs.add(pkg)
4189 for libc_pkg in root_libc_pkgs:
4190 if libc_pkg in earlier_libc_pkgs:
4191 graph.add(libc_pkg, pkg,
4192 priority=DepPriority(buildtime=True))
4194 def schedulerGraph(self):
4196 The scheduler graph is identical to the normal one except that
4197 uninstall edges are reversed in specific cases that require
4198 conflicting packages to be temporarily installed simultaneously.
4199 This is intended for use by the Scheduler in it's parallelization
4200 logic. It ensures that temporary simultaneous installation of
4201 conflicting packages is avoided when appropriate (especially for
4202 !!atom blockers), but allowed in specific cases that require it.
4204 Note that this method calls break_refs() which alters the state of
4205 internal Package instances such that this depgraph instance should
4206 not be used to perform any more calculations.
4209 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4210 mergelist = self.altlist()
4211 self._implicit_libc_deps(mergelist,
4212 self._dynamic_config._scheduler_graph)
4214 # Break DepPriority.satisfied attributes which reference
4215 # installed Package instances.
4216 for parents, children, node in \
4217 self._dynamic_config._scheduler_graph.nodes.values():
4218 for priorities in chain(parents.values(), children.values()):
4219 for priority in priorities:
4220 if priority.satisfied:
4221 priority.satisfied = True
4223 pkg_cache = self._frozen_config._pkg_cache
4224 graph = self._dynamic_config._scheduler_graph
4225 trees = self._frozen_config.trees
4226 pruned_pkg_cache = {}
4227 for pkg in pkg_cache:
4228 if pkg in graph or \
4229 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4230 pruned_pkg_cache[pkg] = pkg
4233 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4237 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4241 def break_refs(self):
4243 Break any references in Package instances that lead back to the depgraph.
4244 This is useful if you want to hold references to packages without also
4245 holding the depgraph on the heap. It should only be called after the
4246 depgraph and _frozen_config will not be used for any more calculations.
4248 for root_config in self._frozen_config.roots.values():
4249 root_config.update(self._frozen_config._trees_orig[
4250 root_config.root]["root_config"])
4251 # Both instances are now identical, so discard the
4252 # original which should have no other references.
4253 self._frozen_config._trees_orig[
4254 root_config.root]["root_config"] = root_config
4256 def _resolve_conflicts(self):
4257 if not self._complete_graph():
4258 raise self._unknown_internal_error()
4260 if not self._validate_blockers():
4261 self._dynamic_config._skip_restart = True
4262 raise self._unknown_internal_error()
4264 if self._dynamic_config._slot_collision_info:
4265 self._process_slot_conflicts()
4267 def _serialize_tasks(self):
4269 if "--debug" in self._frozen_config.myopts:
4270 writemsg("\ndigraph:\n\n", noiselevel=-1)
4271 self._dynamic_config.digraph.debug_print()
4272 writemsg("\n", noiselevel=-1)
4274 scheduler_graph = self._dynamic_config.digraph.copy()
4276 if '--nodeps' in self._frozen_config.myopts:
4277 # Preserve the package order given on the command line.
4278 return ([node for node in scheduler_graph \
4279 if isinstance(node, Package) \
4280 and node.operation == 'merge'], scheduler_graph)
4282 mygraph=self._dynamic_config.digraph.copy()
4284 removed_nodes = set()
4286 # Prune off all DependencyArg instances since they aren't
4287 # needed, and because of nested sets this is faster than doing
4288 # it with multiple digraph.root_nodes() calls below. This also
4289 # takes care of nested sets that have circular references,
4290 # which wouldn't be matched by digraph.root_nodes().
4291 for node in mygraph:
4292 if isinstance(node, DependencyArg):
4293 removed_nodes.add(node)
4295 mygraph.difference_update(removed_nodes)
4296 removed_nodes.clear()
4298 # Prune "nomerge" root nodes if nothing depends on them, since
4299 # otherwise they slow down merge order calculation. Don't remove
4300 # non-root nodes since they help optimize merge order in some cases
4301 # such as revdep-rebuild.
4304 for node in mygraph.root_nodes():
4305 if not isinstance(node, Package) or \
4306 node.installed or node.onlydeps:
4307 removed_nodes.add(node)
4309 self._spinner_update()
4310 mygraph.difference_update(removed_nodes)
4311 if not removed_nodes:
4313 removed_nodes.clear()
4314 self._merge_order_bias(mygraph)
4315 def cmp_circular_bias(n1, n2):
4317 RDEPEND is stronger than PDEPEND and this function
4318 measures such a strength bias within a circular
4319 dependency relationship.
4321 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4322 ignore_priority=priority_range.ignore_medium_soft)
4323 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4324 ignore_priority=priority_range.ignore_medium_soft)
4325 if n1_n2_medium == n2_n1_medium:
4330 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4332 # Contains uninstall tasks that have been scheduled to
4333 # occur after overlapping blockers have been installed.
4334 scheduled_uninstalls = set()
4335 # Contains any Uninstall tasks that have been ignored
4336 # in order to avoid the circular deps code path. These
4337 # correspond to blocker conflicts that could not be
4339 ignored_uninstall_tasks = set()
4340 have_uninstall_task = False
4341 complete = "complete" in self._dynamic_config.myparams
4344 def get_nodes(**kwargs):
4346 Returns leaf nodes excluding Uninstall instances
4347 since those should be executed as late as possible.
4349 return [node for node in mygraph.leaf_nodes(**kwargs) \
4350 if isinstance(node, Package) and \
4351 (node.operation != "uninstall" or \
4352 node in scheduled_uninstalls)]
4354 # sys-apps/portage needs special treatment if ROOT="/"
4355 running_root = self._frozen_config._running_root.root
4356 runtime_deps = InternalPackageSet(
4357 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4358 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4359 PORTAGE_PACKAGE_ATOM)
4360 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4361 PORTAGE_PACKAGE_ATOM)
4364 running_portage = running_portage[0]
4366 running_portage = None
4368 if replacement_portage:
4369 replacement_portage = replacement_portage[0]
4371 replacement_portage = None
4373 if replacement_portage == running_portage:
4374 replacement_portage = None
4376 if replacement_portage is not None and \
4377 (running_portage is None or \
4378 running_portage.cpv != replacement_portage.cpv or \
4379 '9999' in replacement_portage.cpv or \
4380 'git' in replacement_portage.inherited or \
4381 'git-2' in replacement_portage.inherited):
4382 # update from running_portage to replacement_portage asap
4383 asap_nodes.append(replacement_portage)
4385 if running_portage is not None:
4387 portage_rdepend = self._select_atoms_highest_available(
4388 running_root, running_portage.metadata["RDEPEND"],
4389 myuse=self._pkg_use_enabled(running_portage),
4390 parent=running_portage, strict=False)
4391 except portage.exception.InvalidDependString as e:
4392 portage.writemsg("!!! Invalid RDEPEND in " + \
4393 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4394 (running_root, running_portage.cpv, e), noiselevel=-1)
4396 portage_rdepend = {running_portage : []}
4397 for atoms in portage_rdepend.values():
4398 runtime_deps.update(atom for atom in atoms \
4399 if not atom.blocker)
4401 # Merge libc asap, in order to account for implicit
4402 # dependencies. See bug #303567.
4403 implicit_libc_roots = (running_root,)
4404 for root in implicit_libc_roots:
4406 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4407 graphdb = self._dynamic_config.mydbapi[root]
4408 for atom in self._expand_virt_from_graph(root,
4409 portage.const.LIBC_PACKAGE_ATOM):
4412 match = graphdb.match_pkgs(atom)
4416 if pkg.operation == "merge" and \
4417 not vardb.cpv_exists(pkg.cpv):
4421 # If there's also an os-headers upgrade, we need to
4422 # pull that in first. See bug #328317.
4423 for atom in self._expand_virt_from_graph(root,
4424 portage.const.OS_HEADERS_PACKAGE_ATOM):
4427 match = graphdb.match_pkgs(atom)
4431 if pkg.operation == "merge" and \
4432 not vardb.cpv_exists(pkg.cpv):
4433 asap_nodes.append(pkg)
4435 asap_nodes.extend(libc_pkgs)
4437 def gather_deps(ignore_priority, mergeable_nodes,
4438 selected_nodes, node):
4440 Recursively gather a group of nodes that RDEPEND on
4441 eachother. This ensures that they are merged as a group
4442 and get their RDEPENDs satisfied as soon as possible.
4444 if node in selected_nodes:
4446 if node not in mergeable_nodes:
4448 if node == replacement_portage and \
4449 mygraph.child_nodes(node,
4450 ignore_priority=priority_range.ignore_medium_soft):
4451 # Make sure that portage always has all of it's
4452 # RDEPENDs installed first.
4454 selected_nodes.add(node)
4455 for child in mygraph.child_nodes(node,
4456 ignore_priority=ignore_priority):
4457 if not gather_deps(ignore_priority,
4458 mergeable_nodes, selected_nodes, child):
4462 def ignore_uninst_or_med(priority):
4463 if priority is BlockerDepPriority.instance:
4465 return priority_range.ignore_medium(priority)
4467 def ignore_uninst_or_med_soft(priority):
4468 if priority is BlockerDepPriority.instance:
4470 return priority_range.ignore_medium_soft(priority)
4472 tree_mode = "--tree" in self._frozen_config.myopts
4473 # Tracks whether or not the current iteration should prefer asap_nodes
4474 # if available. This is set to False when the previous iteration
4475 # failed to select any nodes. It is reset whenever nodes are
4476 # successfully selected.
4479 # Controls whether or not the current iteration should drop edges that
4480 # are "satisfied" by installed packages, in order to solve circular
4481 # dependencies. The deep runtime dependencies of installed packages are
4482 # not checked in this case (bug #199856), so it must be avoided
4483 # whenever possible.
4484 drop_satisfied = False
4486 # State of variables for successive iterations that loosen the
4487 # criteria for node selection.
4489 # iteration prefer_asap drop_satisfied
4494 # If no nodes are selected on the last iteration, it is due to
4495 # unresolved blockers or circular dependencies.
4497 while not mygraph.empty():
4498 self._spinner_update()
4499 selected_nodes = None
4500 ignore_priority = None
4501 if drop_satisfied or (prefer_asap and asap_nodes):
4502 priority_range = DepPrioritySatisfiedRange
4504 priority_range = DepPriorityNormalRange
4505 if prefer_asap and asap_nodes:
4506 # ASAP nodes are merged before their soft deps. Go ahead and
4507 # select root nodes here if necessary, since it's typical for
4508 # the parent to have been removed from the graph already.
4509 asap_nodes = [node for node in asap_nodes \
4510 if mygraph.contains(node)]
4511 for node in asap_nodes:
4512 if not mygraph.child_nodes(node,
4513 ignore_priority=priority_range.ignore_soft):
4514 selected_nodes = [node]
4515 asap_nodes.remove(node)
4517 if not selected_nodes and \
4518 not (prefer_asap and asap_nodes):
4519 for i in range(priority_range.NONE,
4520 priority_range.MEDIUM_SOFT + 1):
4521 ignore_priority = priority_range.ignore_priority[i]
4522 nodes = get_nodes(ignore_priority=ignore_priority)
4524 # If there is a mixture of merges and uninstalls,
4525 # do the uninstalls first.
4527 good_uninstalls = []
4529 if node.operation == "uninstall":
4530 good_uninstalls.append(node)
4533 nodes = good_uninstalls
4537 if ignore_priority is None and not tree_mode:
4538 # Greedily pop all of these nodes since no
4539 # relationship has been ignored. This optimization
4540 # destroys --tree output, so it's disabled in tree
4542 selected_nodes = nodes
4544 # For optimal merge order:
4545 # * Only pop one node.
4546 # * Removing a root node (node without a parent)
4547 # will not produce a leaf node, so avoid it.
4548 # * It's normal for a selected uninstall to be a
4549 # root node, so don't check them for parents.
4551 if node.operation == "uninstall" or \
4552 mygraph.parent_nodes(node):
4553 selected_nodes = [node]
4559 if not selected_nodes:
4560 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4562 mergeable_nodes = set(nodes)
4563 if prefer_asap and asap_nodes:
4565 for i in range(priority_range.SOFT,
4566 priority_range.MEDIUM_SOFT + 1):
4567 ignore_priority = priority_range.ignore_priority[i]
4569 if not mygraph.parent_nodes(node):
4571 selected_nodes = set()
4572 if gather_deps(ignore_priority,
4573 mergeable_nodes, selected_nodes, node):
4576 selected_nodes = None
4580 if prefer_asap and asap_nodes and not selected_nodes:
4581 # We failed to find any asap nodes to merge, so ignore
4582 # them for the next iteration.
4586 if selected_nodes and ignore_priority is not None:
4587 # Try to merge ignored medium_soft deps as soon as possible
4588 # if they're not satisfied by installed packages.
4589 for node in selected_nodes:
4590 children = set(mygraph.child_nodes(node))
4591 soft = children.difference(
4592 mygraph.child_nodes(node,
4593 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4594 medium_soft = children.difference(
4595 mygraph.child_nodes(node,
4597 DepPrioritySatisfiedRange.ignore_medium_soft))
4598 medium_soft.difference_update(soft)
4599 for child in medium_soft:
4600 if child in selected_nodes:
4602 if child in asap_nodes:
4604 asap_nodes.append(child)
4606 if selected_nodes and len(selected_nodes) > 1:
4607 if not isinstance(selected_nodes, list):
4608 selected_nodes = list(selected_nodes)
4609 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4611 if not selected_nodes and not myblocker_uninstalls.is_empty():
4612 # An Uninstall task needs to be executed in order to
4613 # avoid conflict if possible.
4616 priority_range = DepPrioritySatisfiedRange
4618 priority_range = DepPriorityNormalRange
4620 mergeable_nodes = get_nodes(
4621 ignore_priority=ignore_uninst_or_med)
4623 min_parent_deps = None
4626 for task in myblocker_uninstalls.leaf_nodes():
4627 # Do some sanity checks so that system or world packages
4628 # don't get uninstalled inappropriately here (only really
4629 # necessary when --complete-graph has not been enabled).
4631 if task in ignored_uninstall_tasks:
4634 if task in scheduled_uninstalls:
4635 # It's been scheduled but it hasn't
4636 # been executed yet due to dependence
4637 # on installation of blocking packages.
4640 root_config = self._frozen_config.roots[task.root]
4641 inst_pkg = self._pkg(task.cpv, "installed", root_config,
4644 if self._dynamic_config.digraph.contains(inst_pkg):
4647 forbid_overlap = False
4648 heuristic_overlap = False
4649 for blocker in myblocker_uninstalls.parent_nodes(task):
4650 if not eapi_has_strong_blocks(blocker.eapi):
4651 heuristic_overlap = True
4652 elif blocker.atom.blocker.overlap.forbid:
4653 forbid_overlap = True
4655 if forbid_overlap and running_root == task.root:
4658 if heuristic_overlap and running_root == task.root:
4659 # Never uninstall sys-apps/portage or it's essential
4660 # dependencies, except through replacement.
4662 runtime_dep_atoms = \
4663 list(runtime_deps.iterAtomsForPackage(task))
4664 except portage.exception.InvalidDependString as e:
4665 portage.writemsg("!!! Invalid PROVIDE in " + \
4666 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4667 (task.root, task.cpv, e), noiselevel=-1)
4671 # Don't uninstall a runtime dep if it appears
4672 # to be the only suitable one installed.
4674 vardb = root_config.trees["vartree"].dbapi
4675 for atom in runtime_dep_atoms:
4676 other_version = None
4677 for pkg in vardb.match_pkgs(atom):
4678 if pkg.cpv == task.cpv and \
4679 pkg.metadata["COUNTER"] == \
4680 task.metadata["COUNTER"]:
4684 if other_version is None:
4690 # For packages in the system set, don't take
4691 # any chances. If the conflict can't be resolved
4692 # by a normal replacement operation then abort.
4695 for atom in root_config.sets[
4696 "system"].iterAtomsForPackage(task):
4699 except portage.exception.InvalidDependString as e:
4700 portage.writemsg("!!! Invalid PROVIDE in " + \
4701 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4702 (task.root, task.cpv, e), noiselevel=-1)
4708 # Note that the world check isn't always
4709 # necessary since self._complete_graph() will
4710 # add all packages from the system and world sets to the
4711 # graph. This just allows unresolved conflicts to be
4712 # detected as early as possible, which makes it possible
4713 # to avoid calling self._complete_graph() when it is
4714 # unnecessary due to blockers triggering an abortion.
4716 # For packages in the world set, go ahead an uninstall
4717 # when necessary, as long as the atom will be satisfied
4718 # in the final state.
4719 graph_db = self._dynamic_config.mydbapi[task.root]
4722 for atom in root_config.sets[
4723 "selected"].iterAtomsForPackage(task):
4725 for pkg in graph_db.match_pkgs(atom):
4732 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
4734 except portage.exception.InvalidDependString as e:
4735 portage.writemsg("!!! Invalid PROVIDE in " + \
4736 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4737 (task.root, task.cpv, e), noiselevel=-1)
4743 # Check the deps of parent nodes to ensure that
4744 # the chosen task produces a leaf node. Maybe
4745 # this can be optimized some more to make the
4746 # best possible choice, but the current algorithm
4747 # is simple and should be near optimal for most
4749 self._spinner_update()
4750 mergeable_parent = False
4752 parent_deps.add(task)
4753 for parent in mygraph.parent_nodes(task):
4754 parent_deps.update(mygraph.child_nodes(parent,
4755 ignore_priority=priority_range.ignore_medium_soft))
4756 if min_parent_deps is not None and \
4757 len(parent_deps) >= min_parent_deps:
4758 # This task is no better than a previously selected
4759 # task, so abort search now in order to avoid wasting
4760 # any more cpu time on this task. This increases
4761 # performance dramatically in cases when there are
4762 # hundreds of blockers to solve, like when
4763 # upgrading to a new slot of kde-meta.
4764 mergeable_parent = None
4766 if parent in mergeable_nodes and \
4767 gather_deps(ignore_uninst_or_med_soft,
4768 mergeable_nodes, set(), parent):
4769 mergeable_parent = True
4771 if not mergeable_parent:
4774 if min_parent_deps is None or \
4775 len(parent_deps) < min_parent_deps:
4776 min_parent_deps = len(parent_deps)
4779 if uninst_task is not None and min_parent_deps == 1:
4780 # This is the best possible result, so so abort search
4781 # now in order to avoid wasting any more cpu time.
4784 if uninst_task is not None:
4785 # The uninstall is performed only after blocking
4786 # packages have been merged on top of it. File
4787 # collisions between blocking packages are detected
4788 # and removed from the list of files to be uninstalled.
4789 scheduled_uninstalls.add(uninst_task)
4790 parent_nodes = mygraph.parent_nodes(uninst_task)
4792 # Reverse the parent -> uninstall edges since we want
4793 # to do the uninstall after blocking packages have
4794 # been merged on top of it.
4795 mygraph.remove(uninst_task)
4796 for blocked_pkg in parent_nodes:
4797 mygraph.add(blocked_pkg, uninst_task,
4798 priority=BlockerDepPriority.instance)
4799 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4800 scheduler_graph.add(blocked_pkg, uninst_task,
4801 priority=BlockerDepPriority.instance)
4803 # Sometimes a merge node will render an uninstall
4804 # node unnecessary (due to occupying the same SLOT),
4805 # and we want to avoid executing a separate uninstall
4806 # task in that case.
4807 slot_node = self._dynamic_config.mydbapi[uninst_task.root
4808 ].match_pkgs(uninst_task.slot_atom)
4810 slot_node[0].operation == "merge":
4811 mygraph.add(slot_node[0], uninst_task,
4812 priority=BlockerDepPriority.instance)
4814 # Reset the state variables for leaf node selection and
4815 # continue trying to select leaf nodes.
4817 drop_satisfied = False
4820 if not selected_nodes:
4821 # Only select root nodes as a last resort. This case should
4822 # only trigger when the graph is nearly empty and the only
4823 # remaining nodes are isolated (no parents or children). Since
4824 # the nodes must be isolated, ignore_priority is not needed.
4825 selected_nodes = get_nodes()
4827 if not selected_nodes and not drop_satisfied:
4828 drop_satisfied = True
4831 if not selected_nodes and not myblocker_uninstalls.is_empty():
4832 # If possible, drop an uninstall task here in order to avoid
4833 # the circular deps code path. The corresponding blocker will
4834 # still be counted as an unresolved conflict.
4836 for node in myblocker_uninstalls.leaf_nodes():
4838 mygraph.remove(node)
4843 ignored_uninstall_tasks.add(node)
4846 if uninst_task is not None:
4847 # Reset the state variables for leaf node selection and
4848 # continue trying to select leaf nodes.
4850 drop_satisfied = False
4853 if not selected_nodes:
4854 self._dynamic_config._circular_deps_for_display = mygraph
4855 self._dynamic_config._skip_restart = True
4856 raise self._unknown_internal_error()
4858 # At this point, we've succeeded in selecting one or more nodes, so
4859 # reset state variables for leaf node selection.
4861 drop_satisfied = False
4863 mygraph.difference_update(selected_nodes)
4865 for node in selected_nodes:
4866 if isinstance(node, Package) and \
4867 node.operation == "nomerge":
4870 # Handle interactions between blockers
4871 # and uninstallation tasks.
4872 solved_blockers = set()
4874 if isinstance(node, Package) and \
4875 "uninstall" == node.operation:
4876 have_uninstall_task = True
4879 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
4880 inst_pkg = vardb.match_pkgs(node.slot_atom)
4882 # The package will be replaced by this one, so remove
4883 # the corresponding Uninstall task if necessary.
4884 inst_pkg = inst_pkg[0]
4885 uninst_task = Package(built=inst_pkg.built,
4886 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4887 metadata=inst_pkg.metadata,
4888 operation="uninstall",
4889 root_config=inst_pkg.root_config,
4890 type_name=inst_pkg.type_name)
4892 mygraph.remove(uninst_task)
4896 if uninst_task is not None and \
4897 uninst_task not in ignored_uninstall_tasks and \
4898 myblocker_uninstalls.contains(uninst_task):
4899 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4900 myblocker_uninstalls.remove(uninst_task)
4901 # Discard any blockers that this Uninstall solves.
4902 for blocker in blocker_nodes:
4903 if not myblocker_uninstalls.child_nodes(blocker):
4904 myblocker_uninstalls.remove(blocker)
4906 self._dynamic_config._unsolvable_blockers:
4907 solved_blockers.add(blocker)
4909 retlist.append(node)
4911 if (isinstance(node, Package) and \
4912 "uninstall" == node.operation) or \
4913 (uninst_task is not None and \
4914 uninst_task in scheduled_uninstalls):
4915 # Include satisfied blockers in the merge list
4916 # since the user might be interested and also
4917 # it serves as an indicator that blocking packages
4918 # will be temporarily installed simultaneously.
4919 for blocker in solved_blockers:
4920 retlist.append(blocker)
4922 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
4923 for node in myblocker_uninstalls.root_nodes():
4924 unsolvable_blockers.add(node)
4926 # If any Uninstall tasks need to be executed in order
4927 # to avoid a conflict, complete the graph with any
4928 # dependencies that may have been initially
4929 # neglected (to ensure that unsafe Uninstall tasks
4930 # are properly identified and blocked from execution).
4931 if have_uninstall_task and \
4933 not unsolvable_blockers:
4934 self._dynamic_config.myparams["complete"] = True
4935 if '--debug' in self._frozen_config.myopts:
4937 msg.append("enabling 'complete' depgraph mode " + \
4938 "due to uninstall task(s):")
4940 for node in retlist:
4941 if isinstance(node, Package) and \
4942 node.operation == 'uninstall':
4943 msg.append("\t%s" % (node,))
4944 writemsg_level("\n%s\n" % \
4945 "".join("%s\n" % line for line in msg),
4946 level=logging.DEBUG, noiselevel=-1)
4947 raise self._serialize_tasks_retry("")
4949 # Set satisfied state on blockers, but not before the
4950 # above retry path, since we don't want to modify the
4951 # state in that case.
4952 for node in retlist:
4953 if isinstance(node, Blocker):
4954 node.satisfied = True
4956 for blocker in unsolvable_blockers:
4957 retlist.append(blocker)
4959 if unsolvable_blockers and \
4960 not self._accept_blocker_conflicts():
4961 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
4962 self._dynamic_config._serialized_tasks_cache = retlist[:]
4963 self._dynamic_config._scheduler_graph = scheduler_graph
4964 self._dynamic_config._skip_restart = True
4965 raise self._unknown_internal_error()
4967 if self._dynamic_config._slot_collision_info and \
4968 not self._accept_blocker_conflicts():
4969 self._dynamic_config._serialized_tasks_cache = retlist[:]
4970 self._dynamic_config._scheduler_graph = scheduler_graph
4971 raise self._unknown_internal_error()
4973 return retlist, scheduler_graph
4975 def _show_circular_deps(self, mygraph):
4976 self._dynamic_config._circular_dependency_handler = \
4977 circular_dependency_handler(self, mygraph)
4978 handler = self._dynamic_config._circular_dependency_handler
4980 self._frozen_config.myopts.pop("--quiet", None)
4981 self._frozen_config.myopts["--verbose"] = True
4982 self._frozen_config.myopts["--tree"] = True
4983 portage.writemsg("\n\n", noiselevel=-1)
4984 self.display(handler.merge_list)
4985 prefix = colorize("BAD", " * ")
4986 portage.writemsg("\n", noiselevel=-1)
4987 portage.writemsg(prefix + "Error: circular dependencies:\n",
4989 portage.writemsg("\n", noiselevel=-1)
4991 if handler.circular_dep_message is None or \
4992 "--debug" in self._frozen_config.myopts:
4993 handler.debug_print()
4994 portage.writemsg("\n", noiselevel=-1)
4996 if handler.circular_dep_message is not None:
4997 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
4999 suggestions = handler.suggestions
5001 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
5002 if len(suggestions) == 1:
5003 writemsg("by applying the following change:\n", noiselevel=-1)
5005 writemsg("by applying " + colorize("bold", "any of") + \
5006 " the following changes:\n", noiselevel=-1)
5007 writemsg("".join(suggestions), noiselevel=-1)
5008 writemsg("\nNote that this change can be reverted, once the package has" + \
5009 " been installed.\n", noiselevel=-1)
5010 if handler.large_cycle_count:
5011 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
5012 "Several changes might be required to resolve all cycles.\n" + \
5013 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
5015 writemsg("\n\n", noiselevel=-1)
5016 writemsg(prefix + "Note that circular dependencies " + \
5017 "can often be avoided by temporarily\n", noiselevel=-1)
5018 writemsg(prefix + "disabling USE flags that trigger " + \
5019 "optional dependencies.\n", noiselevel=-1)
5021 def _show_merge_list(self):
5022 if self._dynamic_config._serialized_tasks_cache is not None and \
5023 not (self._dynamic_config._displayed_list and \
5024 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
5025 self._dynamic_config._displayed_list == \
5026 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
5027 display_list = self._dynamic_config._serialized_tasks_cache[:]
5028 if "--tree" in self._frozen_config.myopts:
5029 display_list.reverse()
5030 self.display(display_list)
5032 def _show_unsatisfied_blockers(self, blockers):
5033 self._show_merge_list()
5034 msg = "Error: The above package list contains " + \
5035 "packages which cannot be installed " + \
5036 "at the same time on the same system."
5037 prefix = colorize("BAD", " * ")
5038 portage.writemsg("\n", noiselevel=-1)
5039 for line in textwrap.wrap(msg, 70):
5040 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5042 # Display the conflicting packages along with the packages
5043 # that pulled them in. This is helpful for troubleshooting
5044 # cases in which blockers don't solve automatically and
5045 # the reasons are not apparent from the normal merge list
5049 for blocker in blockers:
5050 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
5051 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
5052 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
5053 if not parent_atoms:
5054 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
5055 if atom is not None:
5056 parent_atoms = set([("@selected", atom)])
5058 conflict_pkgs[pkg] = parent_atoms
5061 # Reduce noise by pruning packages that are only
5062 # pulled in by other conflict packages.
5064 for pkg, parent_atoms in conflict_pkgs.items():
5065 relevant_parent = False
5066 for parent, atom in parent_atoms:
5067 if parent not in conflict_pkgs:
5068 relevant_parent = True
5070 if not relevant_parent:
5071 pruned_pkgs.add(pkg)
5072 for pkg in pruned_pkgs:
5073 del conflict_pkgs[pkg]
5079 # Max number of parents shown, to avoid flooding the display.
5081 for pkg, parent_atoms in conflict_pkgs.items():
5085 # Prefer packages that are not directly involved in a conflict.
5086 for parent_atom in parent_atoms:
5087 if len(pruned_list) >= max_parents:
5089 parent, atom = parent_atom
5090 if parent not in conflict_pkgs:
5091 pruned_list.add(parent_atom)
5093 for parent_atom in parent_atoms:
5094 if len(pruned_list) >= max_parents:
5096 pruned_list.add(parent_atom)
5098 omitted_parents = len(parent_atoms) - len(pruned_list)
5099 msg.append(indent + "%s pulled in by\n" % pkg)
5101 for parent_atom in pruned_list:
5102 parent, atom = parent_atom
5103 msg.append(2*indent)
5104 if isinstance(parent,
5105 (PackageArg, AtomArg)):
5106 # For PackageArg and AtomArg types, it's
5107 # redundant to display the atom attribute.
5108 msg.append(str(parent))
5110 # Display the specific atom from SetArg or
5112 msg.append("%s required by %s" % (atom, parent))
5116 msg.append(2*indent)
5117 msg.append("(and %d more)\n" % omitted_parents)
5121 writemsg("".join(msg), noiselevel=-1)
5123 if "--quiet" not in self._frozen_config.myopts:
5124 show_blocker_docs_link()
5126 def display(self, mylist, favorites=[], verbosity=None):
5128 # This is used to prevent display_problems() from
5129 # redundantly displaying this exact same merge list
5130 # again via _show_merge_list().
5131 self._dynamic_config._displayed_list = mylist
5133 return display(self, mylist, favorites, verbosity)
5135 def display_problems(self):
5137 Display problems with the dependency graph such as slot collisions.
5138 This is called internally by display() to show the problems _after_
5139 the merge list where it is most likely to be seen, but if display()
5140 is not going to be called then this method should be called explicitly
5141 to ensure that the user is notified of problems with the graph.
5143 All output goes to stderr, except for unsatisfied dependencies which
5144 go to stdout for parsing by programs such as autounmask.
5147 # Note that show_masked_packages() sends its output to
5148 # stdout, and some programs such as autounmask parse the
5149 # output in cases when emerge bails out. However, when
5150 # show_masked_packages() is called for installed packages
5151 # here, the message is a warning that is more appropriate
5152 # to send to stderr, so temporarily redirect stdout to
5153 # stderr. TODO: Fix output code so there's a cleaner way
5154 # to redirect everything to stderr.
5159 sys.stdout = sys.stderr
5160 self._display_problems()
5166 # This goes to stdout for parsing by programs like autounmask.
5167 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
5168 self._show_unsatisfied_dep(*pargs, **kwargs)
5170 def _display_problems(self):
5171 if self._dynamic_config._circular_deps_for_display is not None:
5172 self._show_circular_deps(
5173 self._dynamic_config._circular_deps_for_display)
5175 # The user is only notified of a slot conflict if
5176 # there are no unresolvable blocker conflicts.
5177 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
5178 self._show_unsatisfied_blockers(
5179 self._dynamic_config._unsatisfied_blockers_for_display)
5180 elif self._dynamic_config._slot_collision_info:
5181 self._show_slot_collision_notice()
5183 self._show_missed_update()
5185 def check_if_latest(pkg):
5187 is_latest_in_slot = True
5188 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
5189 root_config = self._frozen_config.roots[pkg.root]
5191 for db, pkg_type, built, installed, db_keys in dbs:
5192 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
5193 if other_pkg.cp != pkg.cp:
5194 # old-style PROVIDE virtual means there are no
5195 # normal matches for this pkg_type
5199 if other_pkg.slot_atom == pkg.slot_atom:
5200 is_latest_in_slot = False
5203 # iter_match_pkgs yields highest version first, so
5204 # there's no need to search this pkg_type any further
5207 if not is_latest_in_slot:
5210 return is_latest, is_latest_in_slot
5213 unstable_keyword_msg = []
5214 for pkg in self._dynamic_config._needed_unstable_keywords:
5215 self._show_merge_list()
5216 if pkg in self._dynamic_config.digraph:
5217 is_latest, is_latest_in_slot = check_if_latest(pkg)
5218 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5219 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5220 use=self._pkg_use_enabled(pkg))
5221 for reason in mreasons:
5222 if reason.unmask_hint and \
5223 reason.unmask_hint.key == 'unstable keyword':
5224 keyword = reason.unmask_hint.value
5226 unstable_keyword_msg.append(self._get_dep_chain_as_comment(pkg))
5228 unstable_keyword_msg.append(">=%s %s\n" % (pkg.cpv, keyword))
5229 elif is_latest_in_slot:
5230 unstable_keyword_msg.append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
5232 unstable_keyword_msg.append("=%s %s\n" % (pkg.cpv, keyword))
5234 use_changes_msg = []
5235 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
5236 self._show_merge_list()
5237 if pkg in self._dynamic_config.digraph:
5238 is_latest, is_latest_in_slot = check_if_latest(pkg)
5239 changes = needed_use_config_change[1]
5241 for flag, state in changes.items():
5243 adjustments.append(flag)
5245 adjustments.append("-" + flag)
5246 use_changes_msg.append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5248 use_changes_msg.append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5249 elif is_latest_in_slot:
5250 use_changes_msg.append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
5252 use_changes_msg.append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5255 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5256 self._show_merge_list()
5257 if pkg in self._dynamic_config.digraph:
5258 is_latest, is_latest_in_slot = check_if_latest(pkg)
5260 license_msg.append(self._get_dep_chain_as_comment(pkg))
5262 license_msg.append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5263 elif is_latest_in_slot:
5264 license_msg.append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
5266 license_msg.append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5268 if unstable_keyword_msg:
5269 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
5270 " are necessary to proceed:\n", noiselevel=-1)
5271 writemsg_stdout("".join(unstable_keyword_msg), noiselevel=-1)
5274 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
5275 " are necessary to proceed:\n", noiselevel=-1)
5276 writemsg_stdout("".join(use_changes_msg), noiselevel=-1)
5279 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
5280 " are necessary to proceed:\n", noiselevel=-1)
5281 writemsg_stdout("".join(license_msg), noiselevel=-1)
5283 # TODO: Add generic support for "set problem" handlers so that
5284 # the below warnings aren't special cases for world only.
5286 if self._dynamic_config._missing_args:
5287 world_problems = False
5288 if "world" in self._dynamic_config.sets[
5289 self._frozen_config.target_root].sets:
5290 # Filter out indirect members of world (from nested sets)
5291 # since only direct members of world are desired here.
5292 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
5293 for arg, atom in self._dynamic_config._missing_args:
5294 if arg.name in ("selected", "world") and atom in world_set:
5295 world_problems = True
5299 sys.stderr.write("\n!!! Problems have been " + \
5300 "detected with your world file\n")
5301 sys.stderr.write("!!! Please run " + \
5302 green("emaint --check world")+"\n\n")
5304 if self._dynamic_config._missing_args:
5305 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5306 " Ebuilds for the following packages are either all\n")
5307 sys.stderr.write(colorize("BAD", "!!!") + \
5308 " masked or don't exist:\n")
5309 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5310 self._dynamic_config._missing_args) + "\n")
5312 if self._dynamic_config._pprovided_args:
5314 for arg, atom in self._dynamic_config._pprovided_args:
5315 if isinstance(arg, SetArg):
5317 arg_atom = (atom, atom)
5320 arg_atom = (arg.arg, atom)
5321 refs = arg_refs.setdefault(arg_atom, [])
5322 if parent not in refs:
5325 msg.append(bad("\nWARNING: "))
5326 if len(self._dynamic_config._pprovided_args) > 1:
5327 msg.append("Requested packages will not be " + \
5328 "merged because they are listed in\n")
5330 msg.append("A requested package will not be " + \
5331 "merged because it is listed in\n")
5332 msg.append("package.provided:\n\n")
5333 problems_sets = set()
5334 for (arg, atom), refs in arg_refs.items():
5337 problems_sets.update(refs)
5339 ref_string = ", ".join(["'%s'" % name for name in refs])
5340 ref_string = " pulled in by " + ref_string
5341 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5343 if "selected" in problems_sets or "world" in problems_sets:
5344 msg.append("This problem can be solved in one of the following ways:\n\n")
5345 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5346 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5347 msg.append(" C) Remove offending entries from package.provided.\n\n")
5348 msg.append("The best course of action depends on the reason that an offending\n")
5349 msg.append("package.provided entry exists.\n\n")
5350 sys.stderr.write("".join(msg))
5352 masked_packages = []
5353 for pkg in self._dynamic_config._masked_license_updates:
5354 root_config = pkg.root_config
5355 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5356 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
5357 masked_packages.append((root_config, pkgsettings,
5358 pkg.cpv, pkg.metadata, mreasons))
5360 writemsg("\n" + colorize("BAD", "!!!") + \
5361 " The following updates are masked by LICENSE changes:\n",
5363 show_masked_packages(masked_packages)
5365 writemsg("\n", noiselevel=-1)
5367 masked_packages = []
5368 for pkg in self._dynamic_config._masked_installed:
5369 root_config = pkg.root_config
5370 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5371 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
5372 masked_packages.append((root_config, pkgsettings,
5373 pkg.cpv, pkg.metadata, mreasons))
5375 writemsg("\n" + colorize("BAD", "!!!") + \
5376 " The following installed packages are masked:\n",
5378 show_masked_packages(masked_packages)
5380 writemsg("\n", noiselevel=-1)
5382 def saveNomergeFavorites(self):
5383 """Find atoms in favorites that are not in the mergelist and add them
5384 to the world file if necessary."""
5385 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5386 "--oneshot", "--onlydeps", "--pretend"):
5387 if x in self._frozen_config.myopts:
5389 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5390 world_set = root_config.sets["selected"]
5392 world_locked = False
5393 if hasattr(world_set, "lock"):
5397 if hasattr(world_set, "load"):
5398 world_set.load() # maybe it's changed on disk
5400 args_set = self._dynamic_config.sets[
5401 self._frozen_config.target_root].sets['__non_set_args__']
5402 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
5403 added_favorites = set()
5404 for x in self._dynamic_config._set_nodes:
5405 if x.operation != "nomerge":
5408 if x.root != root_config.root:
5412 myfavkey = create_world_atom(x, args_set, root_config)
5414 if myfavkey in added_favorites:
5416 added_favorites.add(myfavkey)
5417 except portage.exception.InvalidDependString as e:
5418 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5419 (x.cpv, e), noiselevel=-1)
5420 writemsg("!!! see '%s'\n\n" % os.path.join(
5421 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
5424 for arg in self._dynamic_config._initial_arg_list:
5425 if not isinstance(arg, SetArg):
5427 if arg.root_config.root != root_config.root:
5430 if k in ("selected", "world") or \
5431 not root_config.sets[k].world_candidate:
5436 all_added.append(SETPREFIX + k)
5437 all_added.extend(added_favorites)
5440 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
5441 colorize("INFORM", str(a)), noiselevel=-1)
5443 world_set.update(all_added)
5448 def _loadResumeCommand(self, resume_data, skip_masked=True,
5451 Add a resume command to the graph and validate it in the process. This
5452 will raise a PackageNotFound exception if a package is not available.
5457 if not isinstance(resume_data, dict):
5460 mergelist = resume_data.get("mergelist")
5461 if not isinstance(mergelist, list):
5464 fakedb = self._dynamic_config.mydbapi
5465 trees = self._frozen_config.trees
5466 serialized_tasks = []
5469 if not (isinstance(x, list) and len(x) == 4):
5471 pkg_type, myroot, pkg_key, action = x
5472 if pkg_type not in self.pkg_tree_map:
5474 if action != "merge":
5476 root_config = self._frozen_config.roots[myroot]
5478 pkg = self._pkg(pkg_key, pkg_type, root_config)
5479 except portage.exception.PackageNotFound:
5480 # It does no exist or it is corrupt.
5482 # TODO: log these somewhere
5486 if "merge" == pkg.operation and \
5487 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
5488 modified_use=self._pkg_use_enabled(pkg)):
5491 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
5493 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
5495 self._dynamic_config._unsatisfied_deps_for_display.append(
5496 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5498 fakedb[myroot].cpv_inject(pkg)
5499 serialized_tasks.append(pkg)
5500 self._spinner_update()
5502 if self._dynamic_config._unsatisfied_deps_for_display:
5505 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5506 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5507 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5509 self._select_package = self._select_pkg_from_graph
5510 self._dynamic_config.myparams["selective"] = True
5511 # Always traverse deep dependencies in order to account for
5512 # potentially unsatisfied dependencies of installed packages.
5513 # This is necessary for correct --keep-going or --resume operation
5514 # in case a package from a group of circularly dependent packages
5515 # fails. In this case, a package which has recently been installed
5516 # may have an unsatisfied circular dependency (pulled in by
5517 # PDEPEND, for example). So, even though a package is already
5518 # installed, it may not have all of it's dependencies satisfied, so
5519 # it may not be usable. If such a package is in the subgraph of
5520 # deep depenedencies of a scheduled build, that build needs to
5521 # be cancelled. In order for this type of situation to be
5522 # recognized, deep traversal of dependencies is required.
5523 self._dynamic_config.myparams["deep"] = True
5525 favorites = resume_data.get("favorites")
5526 args_set = self._dynamic_config.sets[
5527 self._frozen_config.target_root].sets['__non_set_args__']
5528 if isinstance(favorites, list):
5529 args = self._load_favorites(favorites)
5533 for task in serialized_tasks:
5534 if isinstance(task, Package) and \
5535 task.operation == "merge":
5536 if not self._add_pkg(task, None):
5539 # Packages for argument atoms need to be explicitly
5540 # added via _add_pkg() so that they are included in the
5541 # digraph (needed at least for --tree display).
5542 for arg in self._expand_set_args(args, add_to_digraph=True):
5543 for atom in arg.pset.getAtoms():
5544 pkg, existing_node = self._select_package(
5545 arg.root_config.root, atom)
5546 if existing_node is None and \
5548 if not self._add_pkg(pkg, Dependency(atom=atom,
5549 root=pkg.root, parent=arg)):
5552 # Allow unsatisfied deps here to avoid showing a masking
5553 # message for an unsatisfied dep that isn't necessarily
5555 if not self._create_graph(allow_unsatisfied=True):
5558 unsatisfied_deps = []
5559 for dep in self._dynamic_config._unsatisfied_deps:
5560 if not isinstance(dep.parent, Package):
5562 if dep.parent.operation == "merge":
5563 unsatisfied_deps.append(dep)
5566 # For unsatisfied deps of installed packages, only account for
5567 # them if they are in the subgraph of dependencies of a package
5568 # which is scheduled to be installed.
5569 unsatisfied_install = False
5571 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5573 node = dep_stack.pop()
5574 if not isinstance(node, Package):
5576 if node.operation == "merge":
5577 unsatisfied_install = True
5579 if node in traversed:
5582 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5584 if unsatisfied_install:
5585 unsatisfied_deps.append(dep)
5587 if masked_tasks or unsatisfied_deps:
5588 # This probably means that a required package
5589 # was dropped via --skipfirst. It makes the
5590 # resume list invalid, so convert it to a
5591 # UnsatisfiedResumeDep exception.
5592 raise self.UnsatisfiedResumeDep(self,
5593 masked_tasks + unsatisfied_deps)
5594 self._dynamic_config._serialized_tasks_cache = None
5597 except self._unknown_internal_error:
5602 def _load_favorites(self, favorites):
5604 Use a list of favorites to resume state from a
5605 previous select_files() call. This creates similar
5606 DependencyArg instances to those that would have
5607 been created by the original select_files() call.
5608 This allows Package instances to be matched with
5609 DependencyArg instances during graph creation.
5611 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5612 sets = root_config.sets
5613 depgraph_sets = self._dynamic_config.sets[root_config.root]
5616 if not isinstance(x, basestring):
5618 if x in ("system", "world"):
5620 if x.startswith(SETPREFIX):
5621 s = x[len(SETPREFIX):]
5624 if s in depgraph_sets.sets:
5627 depgraph_sets.sets[s] = pset
5628 args.append(SetArg(arg=x, pset=pset,
5629 root_config=root_config))
5633 except portage.exception.InvalidAtom:
5635 args.append(AtomArg(arg=x, atom=x,
5636 root_config=root_config))
5638 self._set_args(args)
5641 class UnsatisfiedResumeDep(portage.exception.PortageException):
5643 A dependency of a resume list is not installed. This
5644 can occur when a required package is dropped from the
5645 merge list via --skipfirst.
5647 def __init__(self, depgraph, value):
5648 portage.exception.PortageException.__init__(self, value)
5649 self.depgraph = depgraph
5651 class _internal_exception(portage.exception.PortageException):
5652 def __init__(self, value=""):
5653 portage.exception.PortageException.__init__(self, value)
5655 class _unknown_internal_error(_internal_exception):
5657 Used by the depgraph internally to terminate graph creation.
5658 The specific reason for the failure should have been dumped
5659 to stderr, unfortunately, the exact reason for the failure
5663 class _serialize_tasks_retry(_internal_exception):
5665 This is raised by the _serialize_tasks() method when it needs to
5666 be called again for some reason. The only case that it's currently
5667 used for is when neglected dependencies need to be added to the
5668 graph in order to avoid making a potentially unsafe decision.
5671 class _backtrack_mask(_internal_exception):
5673 This is raised by _show_unsatisfied_dep() when it's called with
5674 check_backtrack=True and a matching package has been masked by
5678 def need_restart(self):
5679 return self._dynamic_config._need_restart and \
5680 not self._dynamic_config._skip_restart
5682 def get_backtrack_parameters(self):
5684 "needed_unstable_keywords":
5685 self._dynamic_config._needed_unstable_keywords.copy(), \
5687 self._dynamic_config._runtime_pkg_mask.copy(),
5688 "needed_use_config_changes":
5689 self._dynamic_config._needed_use_config_changes.copy(),
5690 "needed_license_changes":
5691 self._dynamic_config._needed_license_changes.copy(),
5695 class _dep_check_composite_db(dbapi):
5697 A dbapi-like interface that is optimized for use in dep_check() calls.
5698 This is built on top of the existing depgraph package selection logic.
5699 Some packages that have been added to the graph may be masked from this
5700 view in order to influence the atom preference selection that occurs
5703 def __init__(self, depgraph, root):
5704 dbapi.__init__(self)
5705 self._depgraph = depgraph
5707 self._match_cache = {}
5708 self._cpv_pkg_map = {}
5710 def _clear_cache(self):
5711 self._match_cache.clear()
5712 self._cpv_pkg_map.clear()
5714 def cp_list(self, cp):
5716 Emulate cp_list just so it can be used to check for existence
5717 of new-style virtuals. Since it's a waste of time to return
5718 more than one cpv for this use case, a maximum of one cpv will
5721 if isinstance(cp, Atom):
5726 for pkg in self._depgraph._iter_match_pkgs_any(
5727 self._depgraph._frozen_config.roots[self._root], atom):
5734 def match(self, atom):
5735 ret = self._match_cache.get(atom)
5738 pkg, existing = self._depgraph._select_package(self._root, atom)
5742 # Return the highest available from select_package() as well as
5743 # any matching slots in the graph db.
5745 slots.add(pkg.metadata["SLOT"])
5746 if pkg.cp.startswith("virtual/"):
5747 # For new-style virtual lookahead that occurs inside
5748 # dep_check(), examine all slots. This is needed
5749 # so that newer slots will not unnecessarily be pulled in
5750 # when a satisfying lower slot is already installed. For
5751 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5752 # there's no need to pull in a newer slot to satisfy a
5753 # virtual/jdk dependency.
5754 for db, pkg_type, built, installed, db_keys in \
5755 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5756 for cpv in db.match(atom):
5757 if portage.cpv_getkey(cpv) != pkg.cp:
5759 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5761 if self._visible(pkg):
5762 self._cpv_pkg_map[pkg.cpv] = pkg
5764 slots.remove(pkg.metadata["SLOT"])
5766 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5767 pkg, existing = self._depgraph._select_package(
5768 self._root, slot_atom)
5771 if not self._visible(pkg):
5773 self._cpv_pkg_map[pkg.cpv] = pkg
5776 self._cpv_sort_ascending(ret)
5777 self._match_cache[atom] = ret
5780 def _visible(self, pkg):
5781 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5783 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5784 except (StopIteration, portage.exception.InvalidDependString):
5788 if pkg.installed and \
5789 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
5790 # Account for packages with masks (like KEYWORDS masks)
5791 # that are usually ignored in visibility checks for
5792 # installed packages, in order to handle cases like
5794 myopts = self._depgraph._frozen_config.myopts
5795 use_ebuild_visibility = myopts.get(
5796 '--use-ebuild-visibility', 'n') != 'n'
5797 avoid_update = "--update" not in myopts and \
5798 "remove" not in self._depgraph._dynamic_config.myparams
5799 usepkgonly = "--usepkgonly" in myopts
5800 if not avoid_update:
5801 if not use_ebuild_visibility and usepkgonly:
5805 pkg_eb = self._depgraph._pkg(
5806 pkg.cpv, "ebuild", pkg.root_config)
5807 except portage.exception.PackageNotFound:
5810 if not self._depgraph._pkg_visibility_check(pkg_eb):
5813 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5814 self._root].get(pkg.slot_atom)
5815 if in_graph is None:
5816 # Mask choices for packages which are not the highest visible
5817 # version within their slot (since they usually trigger slot
5819 highest_visible, in_graph = self._depgraph._select_package(
5820 self._root, pkg.slot_atom)
5821 # Note: highest_visible is not necessarily the real highest
5822 # visible, especially when --update is not enabled, so use
5823 # < operator instead of !=.
5824 if pkg < highest_visible:
5826 elif in_graph != pkg:
5827 # Mask choices for packages that would trigger a slot
5828 # conflict with a previously selected package.
5832 def aux_get(self, cpv, wants):
5833 metadata = self._cpv_pkg_map[cpv].metadata
5834 return [metadata.get(x, "") for x in wants]
5836 def match_pkgs(self, atom):
5837 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5839 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5841 if "--quiet" in myopts:
5842 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5843 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
5844 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5845 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
5848 s = search(root_config, spinner, "--searchdesc" in myopts,
5849 "--quiet" not in myopts, "--usepkg" in myopts,
5850 "--usepkgonly" in myopts)
5851 null_cp = portage.dep_getkey(insert_category_into_atom(
5853 cat, atom_pn = portage.catsplit(null_cp)
5854 s.searchkey = atom_pn
5855 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5858 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5859 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
5861 def insert_category_into_atom(atom, category):
5862 alphanum = re.search(r'\w', atom)
5864 ret = atom[:alphanum.start()] + "%s/" % category + \
5865 atom[alphanum.start():]
5870 def _spinner_start(spinner, myopts):
5873 if "--quiet" not in myopts and \
5874 ("--pretend" in myopts or "--ask" in myopts or \
5875 "--tree" in myopts or "--verbose" in myopts):
5877 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5879 elif "--buildpkgonly" in myopts:
5883 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5884 if "--unordered-display" in myopts:
5885 portage.writemsg_stdout("\n" + \
5886 darkgreen("These are the packages that " + \
5887 "would be %s:" % action) + "\n\n")
5889 portage.writemsg_stdout("\n" + \
5890 darkgreen("These are the packages that " + \
5891 "would be %s, in reverse order:" % action) + "\n\n")
5893 portage.writemsg_stdout("\n" + \
5894 darkgreen("These are the packages that " + \
5895 "would be %s, in order:" % action) + "\n\n")
5897 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5898 if not show_spinner:
5899 spinner.update = spinner.update_quiet
5902 portage.writemsg_stdout("Calculating dependencies ")
5904 def _spinner_stop(spinner):
5905 if spinner is None or \
5906 spinner.update == spinner.update_quiet:
5909 if spinner.update != spinner.update_basic:
5910 # update_basic is used for non-tty output,
5911 # so don't output backspaces in that case.
5912 portage.writemsg_stdout("\b\b")
5914 portage.writemsg_stdout("... done!\n")
5916 def backtrack_depgraph(settings, trees, myopts, myparams,
5917 myaction, myfiles, spinner):
5919 Raises PackageSetNotFound if myfiles contains a missing package set.
5921 _spinner_start(spinner, myopts)
5923 return _backtrack_depgraph(settings, trees, myopts, myparams,
5924 myaction, myfiles, spinner)
5926 _spinner_stop(spinner)
5928 def _backtrack_depgraph(settings, trees, myopts, myparams,
5929 myaction, myfiles, spinner):
5931 backtrack_max = myopts.get('--backtrack', 10)
5932 backtrack_parameters = {}
5933 needed_unstable_keywords = None
5934 allow_backtracking = backtrack_max > 0
5936 frozen_config = _frozen_depgraph_config(settings, trees,
5939 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5940 frozen_config=frozen_config,
5941 allow_backtracking=allow_backtracking,
5942 **backtrack_parameters)
5943 success, favorites = mydepgraph.select_files(myfiles)
5945 if mydepgraph.need_restart() and backtracked < backtrack_max:
5946 backtrack_parameters = mydepgraph.get_backtrack_parameters()
5948 elif backtracked and allow_backtracking:
5949 if "--debug" in myopts:
5951 "\n\nbacktracking aborted after %s tries\n\n" % \
5952 backtracked, noiselevel=-1, level=logging.DEBUG)
5953 # Backtracking failed, so disable it and do
5954 # a plain dep calculation + error message.
5955 allow_backtracking = False
5956 #Don't reset needed_unstable_keywords here, since we don't want to
5957 #send the user through a "one step at a time" unmasking session for
5959 backtrack_parameters.pop('runtime_pkg_mask', None)
5964 return (success, mydepgraph, favorites)
5966 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5968 Raises PackageSetNotFound if myfiles contains a missing package set.
5970 _spinner_start(spinner, myopts)
5972 return _resume_depgraph(settings, trees, mtimedb, myopts,
5975 _spinner_stop(spinner)
5977 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5979 Construct a depgraph for the given resume list. This will raise
5980 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5981 TODO: Return reasons for dropped_tasks, for display/logging.
5983 @returns: (success, depgraph, dropped_tasks)
5986 skip_unsatisfied = True
5987 mergelist = mtimedb["resume"]["mergelist"]
5988 dropped_tasks = set()
5989 frozen_config = _frozen_depgraph_config(settings, trees,
5992 mydepgraph = depgraph(settings, trees,
5993 myopts, myparams, spinner, frozen_config=frozen_config)
5995 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5996 skip_masked=skip_masked)
5997 except depgraph.UnsatisfiedResumeDep as e:
5998 if not skip_unsatisfied:
6001 graph = mydepgraph._dynamic_config.digraph
6002 unsatisfied_parents = dict((dep.parent, dep.parent) \
6004 traversed_nodes = set()
6005 unsatisfied_stack = list(unsatisfied_parents)
6006 while unsatisfied_stack:
6007 pkg = unsatisfied_stack.pop()
6008 if pkg in traversed_nodes:
6010 traversed_nodes.add(pkg)
6012 # If this package was pulled in by a parent
6013 # package scheduled for merge, removing this
6014 # package may cause the the parent package's
6015 # dependency to become unsatisfied.
6016 for parent_node in graph.parent_nodes(pkg):
6017 if not isinstance(parent_node, Package) \
6018 or parent_node.operation not in ("merge", "nomerge"):
6021 graph.child_nodes(parent_node,
6022 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
6023 if pkg in unsatisfied:
6024 unsatisfied_parents[parent_node] = parent_node
6025 unsatisfied_stack.append(parent_node)
6027 pruned_mergelist = []
6029 if isinstance(x, list) and \
6030 tuple(x) not in unsatisfied_parents:
6031 pruned_mergelist.append(x)
6033 # If the mergelist doesn't shrink then this loop is infinite.
6034 if len(pruned_mergelist) == len(mergelist):
6035 # This happens if a package can't be dropped because
6036 # it's already installed, but it has unsatisfied PDEPEND.
6038 mergelist[:] = pruned_mergelist
6040 # Exclude installed packages that have been removed from the graph due
6041 # to failure to build/install runtime dependencies after the dependent
6042 # package has already been installed.
6043 dropped_tasks.update(pkg for pkg in \
6044 unsatisfied_parents if pkg.operation != "nomerge")
6046 del e, graph, traversed_nodes, \
6047 unsatisfied_parents, unsatisfied_stack
6051 return (success, mydepgraph, dropped_tasks)
6053 def get_mask_info(root_config, cpv, pkgsettings,
6054 db, pkg_type, built, installed, db_keys, _pkg_use_enabled=None):
6057 metadata = dict(zip(db_keys,
6058 db.aux_get(cpv, db_keys)))
6062 if metadata is None:
6063 mreasons = ["corruption"]
6065 eapi = metadata['EAPI']
6068 if not portage.eapi_is_supported(eapi):
6069 mreasons = ['EAPI %s' % eapi]
6071 pkg = Package(type_name=pkg_type, root_config=root_config,
6072 cpv=cpv, built=built, installed=installed, metadata=metadata)
6075 if _pkg_use_enabled is not None:
6076 modified_use = _pkg_use_enabled(pkg)
6078 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=modified_use)
6079 return metadata, mreasons
6081 def show_masked_packages(masked_packages):
6082 shown_licenses = set()
6083 shown_comments = set()
6084 # Maybe there is both an ebuild and a binary. Only
6085 # show one of them to avoid redundant appearance.
6087 have_eapi_mask = False
6088 for (root_config, pkgsettings, cpv,
6089 metadata, mreasons) in masked_packages:
6090 if cpv in shown_cpvs:
6093 comment, filename = None, None
6094 if "package.mask" in mreasons:
6095 comment, filename = \
6096 portage.getmaskingreason(
6097 cpv, metadata=metadata,
6098 settings=pkgsettings,
6099 portdb=root_config.trees["porttree"].dbapi,
6100 return_location=True)
6101 missing_licenses = []
6103 if not portage.eapi_is_supported(metadata["EAPI"]):
6104 have_eapi_mask = True
6106 missing_licenses = \
6107 pkgsettings._getMissingLicenses(
6109 except portage.exception.InvalidDependString:
6110 # This will have already been reported
6111 # above via mreasons.
6114 writemsg_stdout("- "+cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
6116 if comment and comment not in shown_comments:
6117 writemsg_stdout(filename + ":\n" + comment + "\n",
6119 shown_comments.add(comment)
6120 portdb = root_config.trees["porttree"].dbapi
6121 for l in missing_licenses:
6122 l_path = portdb.findLicensePath(l)
6123 if l in shown_licenses:
6125 msg = ("A copy of the '%s' license" + \
6126 " is located at '%s'.\n\n") % (l, l_path)
6127 writemsg_stdout(msg, noiselevel=-1)
6128 shown_licenses.add(l)
6129 return have_eapi_mask
6131 def show_mask_docs():
6132 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
6133 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
6135 def show_blocker_docs_link():
6136 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
6137 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
6138 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
6140 def get_masking_status(pkg, pkgsettings, root_config, use=None):
6141 return [mreason.message for \
6142 mreason in _get_masking_status(pkg, pkgsettings, root_config, use=use)]
6144 def _get_masking_status(pkg, pkgsettings, root_config, use=None):
6146 mreasons = _getmaskingstatus(
6147 pkg, settings=pkgsettings,
6148 portdb=root_config.trees["porttree"].dbapi)
6150 if not pkg.installed:
6151 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
6152 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
6153 pkg.metadata["CHOST"]))
6156 for msg_type, msgs in pkg.invalid.items():
6159 _MaskReason("invalid", "invalid: %s" % (msg,)))
6161 if not pkg.metadata["SLOT"]:
6163 _MaskReason("invalid", "SLOT: undefined"))