1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
12 from itertools import chain
15 from portage import os
16 from portage import _unicode_decode
17 from portage.const import PORTAGE_PACKAGE_ATOM
18 from portage.dbapi import dbapi
19 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use
20 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
21 from portage.exception import InvalidAtom
22 from portage.output import colorize, create_color_func, \
24 bad = create_color_func("BAD")
25 from portage.package.ebuild.getmaskingstatus import \
26 _getmaskingstatus, _MaskReason
27 from portage._sets import SETPREFIX
28 from portage._sets.base import InternalPackageSet
29 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
30 from portage.util import writemsg_level
31 from portage.util.digraph import digraph
33 from _emerge.AtomArg import AtomArg
34 from _emerge.Blocker import Blocker
35 from _emerge.BlockerCache import BlockerCache
36 from _emerge.BlockerDepPriority import BlockerDepPriority
37 from _emerge.countdown import countdown
38 from _emerge.create_world_atom import create_world_atom
39 from _emerge.Dependency import Dependency
40 from _emerge.DependencyArg import DependencyArg
41 from _emerge.DepPriority import DepPriority
42 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
43 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
44 from _emerge.FakeVartree import FakeVartree
45 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
46 from _emerge.is_valid_package_atom import is_valid_package_atom
47 from _emerge.Package import Package
48 from _emerge.PackageArg import PackageArg
49 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
50 from _emerge.RootConfig import RootConfig
51 from _emerge.search import search
52 from _emerge.SetArg import SetArg
53 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
54 from _emerge.UnmergeDepPriority import UnmergeDepPriority
55 from _emerge.UseFlagDisplay import pkg_use_display
57 from _emerge.resolver.slot_collision import slot_conflict_handler
58 from _emerge.resolver.circular_dependency import circular_dependency_handler
59 from _emerge.resolver.output import display
61 if sys.hexversion >= 0x3000000:
65 class _scheduler_graph_config(object):
66 def __init__(self, trees, pkg_cache, graph, mergelist):
68 self.pkg_cache = pkg_cache
70 self.mergelist = mergelist
72 class _frozen_depgraph_config(object):
74 def __init__(self, settings, trees, myopts, spinner):
75 self.settings = settings
76 self.target_root = settings["ROOT"]
79 if settings.get("PORTAGE_DEBUG", "") == "1":
81 self.spinner = spinner
82 self._running_root = trees["/"]["root_config"]
83 self._opts_no_restart = frozenset(["--buildpkgonly",
84 "--fetchonly", "--fetch-all-uri", "--pretend"])
87 self._trees_orig = trees
89 # All Package instances
91 self._highest_license_masked = {}
93 self.trees[myroot] = {}
94 # Create a RootConfig instance that references
95 # the FakeVartree instead of the real one.
96 self.roots[myroot] = RootConfig(
97 trees[myroot]["vartree"].settings,
99 trees[myroot]["root_config"].setconfig)
100 for tree in ("porttree", "bintree"):
101 self.trees[myroot][tree] = trees[myroot][tree]
102 self.trees[myroot]["vartree"] = \
103 FakeVartree(trees[myroot]["root_config"],
104 pkg_cache=self._pkg_cache,
105 pkg_root_config=self.roots[myroot])
106 self.pkgsettings[myroot] = portage.config(
107 clone=self.trees[myroot]["vartree"].settings)
109 self._required_set_names = set(["world"])
111 self.excluded_pkgs = InternalPackageSet(allow_wildcard=True)
112 for x in ' '.join(myopts.get("--exclude", [])).split():
114 x = Atom(x, allow_wildcard=True)
115 except portage.exception.InvalidAtom:
116 x = Atom("*/" + x, allow_wildcard=True)
117 self.excluded_pkgs.add(x)
119 class _depgraph_sets(object):
121 # contains all sets added to the graph
123 # contains non-set atoms given as arguments
124 self.sets['__non_set_args__'] = InternalPackageSet()
125 # contains all atoms from all sets added to the graph, including
126 # atoms given as arguments
127 self.atoms = InternalPackageSet()
128 self.atom_arg_map = {}
130 class _dynamic_depgraph_config(object):
132 def __init__(self, depgraph, myparams, allow_backtracking,
133 runtime_pkg_mask, needed_unstable_keywords, needed_use_config_changes, needed_license_changes):
134 self.myparams = myparams.copy()
135 self._vdb_loaded = False
136 self._allow_backtracking = allow_backtracking
137 # Maps slot atom to package for each Package added to the graph.
138 self._slot_pkg_map = {}
139 # Maps nodes to the reasons they were selected for reinstallation.
140 self._reinstall_nodes = {}
142 # Contains a filtered view of preferred packages that are selected
143 # from available repositories.
144 self._filtered_trees = {}
145 # Contains installed packages and new packages that have been added
147 self._graph_trees = {}
148 # Caches visible packages returned from _select_package, for use in
149 # depgraph._iter_atoms_for_pkg() SLOT logic.
150 self._visible_pkgs = {}
151 #contains the args created by select_files
152 self._initial_arg_list = []
153 self.digraph = portage.digraph()
154 # manages sets added to the graph
156 # contains all nodes pulled in by self.sets
157 self._set_nodes = set()
158 # Contains only Blocker -> Uninstall edges
159 self._blocker_uninstalls = digraph()
160 # Contains only Package -> Blocker edges
161 self._blocker_parents = digraph()
162 # Contains only irrelevant Package -> Blocker edges
163 self._irrelevant_blockers = digraph()
164 # Contains only unsolvable Package -> Blocker edges
165 self._unsolvable_blockers = digraph()
166 # Contains all Blocker -> Blocked Package edges
167 self._blocked_pkgs = digraph()
168 # Contains world packages that have been protected from
169 # uninstallation but may not have been added to the graph
170 # if the graph is not complete yet.
171 self._blocked_world_pkgs = {}
172 # Contains packages whose dependencies have been traversed.
173 # This use used to check if we have accounted for blockers
174 # relevant to a package.
175 self._traversed_pkg_deps = set()
176 self._slot_collision_info = {}
177 # Slot collision nodes are not allowed to block other packages since
178 # blocker validation is only able to account for one package per slot.
179 self._slot_collision_nodes = set()
180 self._parent_atoms = {}
181 self._slot_conflict_parent_atoms = set()
182 self._slot_conflict_handler = None
183 self._circular_dependency_handler = None
184 self._serialized_tasks_cache = None
185 self._scheduler_graph = None
186 self._displayed_list = None
187 self._pprovided_args = []
188 self._missing_args = []
189 self._masked_installed = set()
190 self._masked_license_updates = set()
191 self._unsatisfied_deps_for_display = []
192 self._unsatisfied_blockers_for_display = None
193 self._circular_deps_for_display = None
195 self._dep_disjunctive_stack = []
196 self._unsatisfied_deps = []
197 self._initially_unsatisfied_deps = []
198 self._ignored_deps = []
199 self._highest_pkg_cache = {}
201 if runtime_pkg_mask is None:
202 runtime_pkg_mask = {}
204 runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
205 runtime_pkg_mask.items())
207 if needed_unstable_keywords is None:
208 self._needed_unstable_keywords = set()
210 self._needed_unstable_keywords = needed_unstable_keywords.copy()
212 if needed_license_changes is None:
213 self._needed_license_changes = {}
215 self._needed_license_changes = needed_license_changes.copy()
217 if needed_use_config_changes is None:
218 self._needed_use_config_changes = {}
220 self._needed_use_config_changes = \
221 dict((k.copy(), (v[0].copy(), v[1].copy())) for (k, v) in \
222 needed_use_config_changes.items())
224 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True
226 self._runtime_pkg_mask = runtime_pkg_mask
227 self._need_restart = False
228 # For conditions that always require user intervention, such as
229 # unsatisfied REQUIRED_USE (currently has no autounmask support).
230 self._skip_restart = False
232 for myroot in depgraph._frozen_config.trees:
233 self.sets[myroot] = _depgraph_sets()
234 self._slot_pkg_map[myroot] = {}
235 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
236 # This dbapi instance will model the state that the vdb will
237 # have after new packages have been installed.
238 fakedb = PackageVirtualDbapi(vardb.settings)
240 self.mydbapi[myroot] = fakedb
243 graph_tree.dbapi = fakedb
244 self._graph_trees[myroot] = {}
245 self._filtered_trees[myroot] = {}
246 # Substitute the graph tree for the vartree in dep_check() since we
247 # want atom selections to be consistent with package selections
248 # have already been made.
249 self._graph_trees[myroot]["porttree"] = graph_tree
250 self._graph_trees[myroot]["vartree"] = graph_tree
253 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
254 self._filtered_trees[myroot]["porttree"] = filtered_tree
255 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
257 # Passing in graph_tree as the vartree here could lead to better
258 # atom selections in some cases by causing atoms for packages that
259 # have been added to the graph to be preferred over other choices.
260 # However, it can trigger atom selections that result in
261 # unresolvable direct circular dependencies. For example, this
262 # happens with gwydion-dylan which depends on either itself or
263 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
264 # gwydion-dylan-bin needs to be selected in order to avoid a
265 # an unresolvable direct circular dependency.
267 # To solve the problem described above, pass in "graph_db" so that
268 # packages that have been added to the graph are distinguishable
269 # from other available packages and installed packages. Also, pass
270 # the parent package into self._select_atoms() calls so that
271 # unresolvable direct circular dependencies can be detected and
272 # avoided when possible.
273 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
274 self._filtered_trees[myroot]["vartree"] = \
275 depgraph._frozen_config.trees[myroot]["vartree"]
278 # (db, pkg_type, built, installed, db_keys)
279 if "remove" in self.myparams:
280 # For removal operations, use _dep_check_composite_db
281 # for availability and visibility checks. This provides
282 # consistency with install operations, so we don't
283 # get install/uninstall cycles like in bug #332719.
284 self._graph_trees[myroot]["porttree"] = filtered_tree
286 if "--usepkgonly" not in depgraph._frozen_config.myopts:
287 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
288 db_keys = list(portdb._aux_cache_keys)
289 dbs.append((portdb, "ebuild", False, False, db_keys))
291 if "--usepkg" in depgraph._frozen_config.myopts:
292 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
293 db_keys = list(bindb._aux_cache_keys)
294 dbs.append((bindb, "binary", True, False, db_keys))
296 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
297 db_keys = list(depgraph._frozen_config._trees_orig[myroot
298 ]["vartree"].dbapi._aux_cache_keys)
299 dbs.append((vardb, "installed", True, True, db_keys))
300 self._filtered_trees[myroot]["dbs"] = dbs
302 class depgraph(object):
304 pkg_tree_map = RootConfig.pkg_tree_map
306 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
308 def __init__(self, settings, trees, myopts, myparams, spinner,
309 frozen_config=None, runtime_pkg_mask=None, needed_unstable_keywords=None, \
310 needed_use_config_changes=None, needed_license_changes=None, allow_backtracking=False):
311 if frozen_config is None:
312 frozen_config = _frozen_depgraph_config(settings, trees,
314 self._frozen_config = frozen_config
315 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
316 allow_backtracking, runtime_pkg_mask, needed_unstable_keywords, \
317 needed_use_config_changes, needed_license_changes)
319 self._select_atoms = self._select_atoms_highest_available
320 self._select_package = self._select_pkg_highest_available
324 Load installed package metadata if appropriate. This used to be called
325 from the constructor, but that wasn't very nice since this procedure
326 is slow and it generates spinner output. So, now it's called on-demand
327 by various methods when necessary.
330 if self._dynamic_config._vdb_loaded:
333 for myroot in self._frozen_config.trees:
335 preload_installed_pkgs = \
336 "--nodeps" not in self._frozen_config.myopts
338 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
339 if not fake_vartree.dbapi:
340 # This needs to be called for the first depgraph, but not for
341 # backtracking depgraphs that share the same frozen_config.
344 # FakeVartree.sync() populates virtuals, and we want
345 # self.pkgsettings to have them populated too.
346 self._frozen_config.pkgsettings[myroot] = \
347 portage.config(clone=fake_vartree.settings)
349 if preload_installed_pkgs:
350 vardb = fake_vartree.dbapi
351 fakedb = self._dynamic_config._graph_trees[
352 myroot]["vartree"].dbapi
355 self._spinner_update()
356 # This triggers metadata updates via FakeVartree.
357 vardb.aux_get(pkg.cpv, [])
358 fakedb.cpv_inject(pkg)
360 # Now that the vardb state is cached in our FakeVartree,
361 # we won't be needing the real vartree cache for awhile.
362 # To make some room on the heap, clear the vardbapi
364 self._frozen_config._trees_orig[myroot
365 ]["vartree"].dbapi._clear_cache()
368 self._dynamic_config._vdb_loaded = True
370 def _spinner_update(self):
371 if self._frozen_config.spinner:
372 self._frozen_config.spinner.update()
374 def _show_missed_update(self):
376 # In order to minimize noise, show only the highest
377 # missed update from each SLOT.
379 for pkg, mask_reasons in \
380 self._dynamic_config._runtime_pkg_mask.items():
382 # Exclude installed here since we only
383 # want to show available updates.
385 k = (pkg.root, pkg.slot_atom)
386 if k in missed_updates:
387 other_pkg, mask_type, parent_atoms = missed_updates[k]
390 for mask_type, parent_atoms in mask_reasons.items():
393 missed_updates[k] = (pkg, mask_type, parent_atoms)
396 if not missed_updates:
399 missed_update_types = {}
400 for pkg, mask_type, parent_atoms in missed_updates.values():
401 missed_update_types.setdefault(mask_type,
402 []).append((pkg, parent_atoms))
404 if '--quiet' in self._frozen_config.myopts and \
405 '--debug' not in self._frozen_config.myopts:
406 missed_update_types.pop("slot conflict", None)
407 missed_update_types.pop("missing dependency", None)
409 self._show_missed_update_slot_conflicts(
410 missed_update_types.get("slot conflict"))
412 self._show_missed_update_unsatisfied_dep(
413 missed_update_types.get("missing dependency"))
415 def _show_missed_update_unsatisfied_dep(self, missed_updates):
417 if not missed_updates:
420 backtrack_masked = []
422 for pkg, parent_atoms in missed_updates:
425 for parent, root, atom in parent_atoms:
426 self._show_unsatisfied_dep(root, atom, myparent=parent,
427 check_backtrack=True)
428 except self._backtrack_mask:
429 # This is displayed below in abbreviated form.
430 backtrack_masked.append((pkg, parent_atoms))
433 writemsg("\n!!! The following update has been skipped " + \
434 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
436 writemsg(str(pkg.slot_atom), noiselevel=-1)
438 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
439 writemsg("\n", noiselevel=-1)
441 for parent, root, atom in parent_atoms:
442 self._show_unsatisfied_dep(root, atom, myparent=parent)
443 writemsg("\n", noiselevel=-1)
446 # These are shown in abbreviated form, in order to avoid terminal
447 # flooding from mask messages as reported in bug #285832.
448 writemsg("\n!!! The following update(s) have been skipped " + \
449 "due to unsatisfied dependencies\n" + \
450 "!!! triggered by backtracking:\n\n", noiselevel=-1)
451 for pkg, parent_atoms in backtrack_masked:
452 writemsg(str(pkg.slot_atom), noiselevel=-1)
454 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
455 writemsg("\n", noiselevel=-1)
457 def _show_missed_update_slot_conflicts(self, missed_updates):
459 if not missed_updates:
463 msg.append("\n!!! One or more updates have been skipped due to " + \
464 "a dependency conflict:\n\n")
467 for pkg, parent_atoms in missed_updates:
468 msg.append(str(pkg.slot_atom))
470 msg.append(" for %s" % (pkg.root,))
473 for parent, atom in parent_atoms:
477 msg.append(" conflicts with\n")
479 if isinstance(parent,
480 (PackageArg, AtomArg)):
481 # For PackageArg and AtomArg types, it's
482 # redundant to display the atom attribute.
483 msg.append(str(parent))
485 # Display the specific atom from SetArg or
487 msg.append("%s required by %s" % (atom, parent))
491 writemsg("".join(msg), noiselevel=-1)
493 def _show_slot_collision_notice(self):
494 """Show an informational message advising the user to mask one of the
495 the packages. In some cases it may be possible to resolve this
496 automatically, but support for backtracking (removal nodes that have
497 already been selected) will be required in order to handle all possible
501 if not self._dynamic_config._slot_collision_info:
504 self._show_merge_list()
506 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
507 handler = self._dynamic_config._slot_conflict_handler
509 conflict = handler.get_conflict()
510 writemsg(conflict, noiselevel=-1)
512 explanation = handler.get_explanation()
514 writemsg(explanation, noiselevel=-1)
517 if "--quiet" in self._frozen_config.myopts:
521 msg.append("It may be possible to solve this problem ")
522 msg.append("by using package.mask to prevent one of ")
523 msg.append("those packages from being selected. ")
524 msg.append("However, it is also possible that conflicting ")
525 msg.append("dependencies exist such that they are impossible to ")
526 msg.append("satisfy simultaneously. If such a conflict exists in ")
527 msg.append("the dependencies of two different packages, then those ")
528 msg.append("packages can not be installed simultaneously.")
529 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
530 if not self._dynamic_config._allow_backtracking and \
531 (backtrack_opt is None or \
532 (backtrack_opt > 0 and backtrack_opt < 30)):
533 msg.append(" You may want to try a larger value of the ")
534 msg.append("--backtrack option, such as --backtrack=30, ")
535 msg.append("in order to see if that will solve this conflict ")
536 msg.append("automatically.")
538 for line in textwrap.wrap(''.join(msg), 70):
539 writemsg(line + '\n', noiselevel=-1)
540 writemsg('\n', noiselevel=-1)
543 msg.append("For more information, see MASKED PACKAGES ")
544 msg.append("section in the emerge man page or refer ")
545 msg.append("to the Gentoo Handbook.")
546 for line in textwrap.wrap(''.join(msg), 70):
547 writemsg(line + '\n', noiselevel=-1)
548 writemsg('\n', noiselevel=-1)
550 def _process_slot_conflicts(self):
552 Process slot conflict data to identify specific atoms which
553 lead to conflict. These atoms only match a subset of the
554 packages that have been pulled into a given slot.
556 for (slot_atom, root), slot_nodes \
557 in self._dynamic_config._slot_collision_info.items():
559 all_parent_atoms = set()
560 for pkg in slot_nodes:
561 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
564 all_parent_atoms.update(parent_atoms)
566 for pkg in slot_nodes:
567 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
568 if parent_atoms is None:
570 self._dynamic_config._parent_atoms[pkg] = parent_atoms
571 for parent_atom in all_parent_atoms:
572 if parent_atom in parent_atoms:
574 # Use package set for matching since it will match via
575 # PROVIDE when necessary, while match_from_list does not.
576 parent, atom = parent_atom
577 atom_set = InternalPackageSet(
578 initial_atoms=(atom,))
579 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
580 parent_atoms.add(parent_atom)
582 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
584 def _reinstall_for_flags(self, forced_flags,
585 orig_use, orig_iuse, cur_use, cur_iuse):
586 """Return a set of flags that trigger reinstallation, or None if there
587 are no such flags."""
588 if "--newuse" in self._frozen_config.myopts or \
589 "--binpkg-respect-use" in self._frozen_config.myopts:
590 flags = set(orig_iuse.symmetric_difference(
591 cur_iuse).difference(forced_flags))
592 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
593 cur_iuse.intersection(cur_use)))
596 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
597 flags = orig_iuse.intersection(orig_use).symmetric_difference(
598 cur_iuse.intersection(cur_use))
603 def _create_graph(self, allow_unsatisfied=False):
604 dep_stack = self._dynamic_config._dep_stack
605 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
606 while dep_stack or dep_disjunctive_stack:
607 self._spinner_update()
609 dep = dep_stack.pop()
610 if isinstance(dep, Package):
611 if not self._add_pkg_deps(dep,
612 allow_unsatisfied=allow_unsatisfied):
615 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
617 if dep_disjunctive_stack:
618 if not self._pop_disjunction(allow_unsatisfied):
622 def _expand_set_args(self, input_args, add_to_digraph=False):
624 Iterate over a list of DependencyArg instances and yield all
625 instances given in the input together with additional SetArg
626 instances that are generated from nested sets.
627 @param input_args: An iterable of DependencyArg instances
628 @type input_args: Iterable
629 @param add_to_digraph: If True then add SetArg instances
630 to the digraph, in order to record parent -> child
631 relationships from nested sets
632 @type add_to_digraph: Boolean
634 @returns: All args given in the input together with additional
635 SetArg instances that are generated from nested sets
638 traversed_set_args = set()
640 for arg in input_args:
641 if not isinstance(arg, SetArg):
645 root_config = arg.root_config
646 depgraph_sets = self._dynamic_config.sets[root_config.root]
649 arg = arg_stack.pop()
650 if arg in traversed_set_args:
652 traversed_set_args.add(arg)
655 # Traverse nested sets and add them to the stack
656 # if they're not already in the graph. Also, graph
657 # edges between parent and nested sets.
658 for token in arg.pset.getNonAtoms():
659 if not token.startswith(SETPREFIX):
661 s = token[len(SETPREFIX):]
662 nested_set = depgraph_sets.sets.get(s)
663 if nested_set is None:
664 nested_set = root_config.sets.get(s)
665 if nested_set is not None:
666 nested_arg = SetArg(arg=token, pset=nested_set,
667 root_config=root_config)
668 arg_stack.append(nested_arg)
670 self._dynamic_config.digraph.add(nested_arg, arg,
671 priority=BlockerDepPriority.instance)
672 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
674 def _add_dep(self, dep, allow_unsatisfied=False):
675 debug = "--debug" in self._frozen_config.myopts
676 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
677 nodeps = "--nodeps" in self._frozen_config.myopts
678 empty = "empty" in self._dynamic_config.myparams
679 deep = self._dynamic_config.myparams.get("deep", 0)
680 recurse = empty or deep is True or dep.depth <= deep
682 if not buildpkgonly and \
684 dep.parent not in self._dynamic_config._slot_collision_nodes:
685 if dep.parent.onlydeps:
686 # It's safe to ignore blockers if the
687 # parent is an --onlydeps node.
689 # The blocker applies to the root where
690 # the parent is or will be installed.
691 blocker = Blocker(atom=dep.atom,
692 eapi=dep.parent.metadata["EAPI"],
693 priority=dep.priority, root=dep.parent.root)
694 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
697 if dep.child is None:
698 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
699 onlydeps=dep.onlydeps)
701 # The caller has selected a specific package
702 # via self._minimize_packages().
704 existing_node = self._dynamic_config._slot_pkg_map[
705 dep.root].get(dep_pkg.slot_atom)
708 if dep.priority.optional:
709 # This could be an unnecessary build-time dep
710 # pulled in by --with-bdeps=y.
712 if allow_unsatisfied:
713 self._dynamic_config._unsatisfied_deps.append(dep)
715 self._dynamic_config._unsatisfied_deps_for_display.append(
716 ((dep.root, dep.atom), {"myparent":dep.parent}))
718 # The parent node should not already be in
719 # runtime_pkg_mask, since that would trigger an
720 # infinite backtracking loop.
721 if self._dynamic_config._allow_backtracking:
722 if dep.parent in self._dynamic_config._runtime_pkg_mask:
723 if "--debug" in self._frozen_config.myopts:
725 "!!! backtracking loop detected: %s %s\n" % \
727 self._dynamic_config._runtime_pkg_mask[
728 dep.parent]), noiselevel=-1)
730 # Do not backtrack if only USE have to be changed in
731 # order to satisfy the dependency.
732 dep_pkg, existing_node = \
733 self._select_package(dep.root, dep.atom.without_use,
734 onlydeps=dep.onlydeps)
736 self._dynamic_config._runtime_pkg_mask.setdefault(
737 dep.parent, {})["missing dependency"] = \
738 set([(dep.parent, dep.root, dep.atom)])
739 self._dynamic_config._need_restart = True
740 if "--debug" in self._frozen_config.myopts:
744 msg.append("backtracking due to unsatisfied dep:")
745 msg.append(" parent: %s" % dep.parent)
746 msg.append(" priority: %s" % dep.priority)
747 msg.append(" root: %s" % dep.root)
748 msg.append(" atom: %s" % dep.atom)
750 writemsg_level("".join("%s\n" % l for l in msg),
751 noiselevel=-1, level=logging.DEBUG)
755 if not self._add_pkg(dep_pkg, dep):
759 def _check_slot_conflict(self, pkg, atom):
760 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
763 matches = pkg.cpv == existing_node.cpv
764 if pkg != existing_node and \
766 # Use package set for matching since it will match via
767 # PROVIDE when necessary, while match_from_list does not.
768 matches = bool(InternalPackageSet(initial_atoms=(atom,),
769 ).findAtomForPackage(existing_node,
770 modified_use=self._pkg_use_enabled(existing_node)))
772 return (existing_node, matches)
774 def _add_pkg(self, pkg, dep):
781 myparent = dep.parent
782 priority = dep.priority
785 priority = DepPriority()
787 Fills the digraph with nodes comprised of packages to merge.
788 mybigkey is the package spec of the package to merge.
789 myparent is the package depending on mybigkey ( or None )
790 addme = Should we add this package to the digraph or are we just looking at it's deps?
791 Think --onlydeps, we need to ignore packages in that case.
794 #IUSE-aware emerge -> USE DEP aware depgraph
795 #"no downgrade" emerge
797 # Ensure that the dependencies of the same package
798 # are never processed more than once.
799 previously_added = pkg in self._dynamic_config.digraph
801 # select the correct /var database that we'll be checking against
802 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
803 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
808 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
809 except portage.exception.InvalidDependString as e:
810 if not pkg.installed:
811 # should have been masked before it was selected
815 # NOTE: REQUIRED_USE checks are delayed until after
816 # package selection, since we want to prompt the user
817 # for USE adjustment rather than have REQUIRED_USE
818 # affect package selection and || dep choices.
819 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
820 eapi_has_required_use(pkg.metadata["EAPI"]):
821 required_use_is_sat = check_required_use(
822 pkg.metadata["REQUIRED_USE"],
823 self._pkg_use_enabled(pkg),
824 pkg.iuse.is_valid_flag)
825 if not required_use_is_sat:
826 if dep.atom is not None and dep.parent is not None:
827 self._add_parent_atom(pkg, (dep.parent, dep.atom))
830 for parent_atom in arg_atoms:
831 parent, atom = parent_atom
832 self._add_parent_atom(pkg, parent_atom)
836 atom = Atom("=" + pkg.cpv)
837 self._dynamic_config._unsatisfied_deps_for_display.append(
838 ((pkg.root, atom), {"myparent":dep.parent}))
839 self._dynamic_config._skip_restart = True
843 if not pkg.installed and \
844 "empty" not in self._dynamic_config.myparams and \
845 vardbapi.match(pkg.slot_atom):
846 # Increase the priority of dependencies on packages that
847 # are being rebuilt. This optimizes merge order so that
848 # dependencies are rebuilt/updated as soon as possible,
849 # which is needed especially when emerge is called by
850 # revdep-rebuild since dependencies may be affected by ABI
851 # breakage that has rendered them useless. Don't adjust
852 # priority here when in "empty" mode since all packages
853 # are being merged in that case.
854 priority.rebuild = True
856 existing_node, existing_node_matches = \
857 self._check_slot_conflict(pkg, dep.atom)
858 slot_collision = False
860 if existing_node_matches:
861 # The existing node can be reused.
863 for parent_atom in arg_atoms:
864 parent, atom = parent_atom
865 self._dynamic_config.digraph.add(existing_node, parent,
867 self._add_parent_atom(existing_node, parent_atom)
868 # If a direct circular dependency is not an unsatisfied
869 # buildtime dependency then drop it here since otherwise
870 # it can skew the merge order calculation in an unwanted
872 if existing_node != myparent or \
873 (priority.buildtime and not priority.satisfied):
874 self._dynamic_config.digraph.addnode(existing_node, myparent,
876 if dep.atom is not None and dep.parent is not None:
877 self._add_parent_atom(existing_node,
878 (dep.parent, dep.atom))
881 # A slot conflict has occurred.
882 # The existing node should not already be in
883 # runtime_pkg_mask, since that would trigger an
884 # infinite backtracking loop.
885 if self._dynamic_config._allow_backtracking and \
887 self._dynamic_config._runtime_pkg_mask:
888 if "--debug" in self._frozen_config.myopts:
890 "!!! backtracking loop detected: %s %s\n" % \
892 self._dynamic_config._runtime_pkg_mask[
893 existing_node]), noiselevel=-1)
894 elif self._dynamic_config._allow_backtracking and \
895 not self._accept_blocker_conflicts():
896 self._add_slot_conflict(pkg)
897 if dep.atom is not None and dep.parent is not None:
898 self._add_parent_atom(pkg, (dep.parent, dep.atom))
900 for parent_atom in arg_atoms:
901 parent, atom = parent_atom
902 self._add_parent_atom(pkg, parent_atom)
903 self._process_slot_conflicts()
908 # The ordering of backtrack_data can make
909 # a difference here, because both mask actions may lead
910 # to valid, but different, solutions and the one with
911 # 'existing_node' masked is usually the better one. Because
912 # of that, we choose an order such that
913 # the backtracker will first explore the choice with
914 # existing_node masked. The backtracker reverses the
915 # order, so the order it uses is the reverse of the
916 # order shown here. See bug #339606.
917 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
918 # For missed update messages, find out which
919 # atoms matched to_be_selected that did not
920 # match to_be_masked.
922 self._dynamic_config._parent_atoms.get(to_be_selected, set())
924 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
926 parent_atoms = conflict_atoms
928 all_parents.update(parent_atoms)
931 for parent, atom in parent_atoms:
932 i = InternalPackageSet(initial_atoms=(atom,))
933 if not i.findAtomForPackage(to_be_masked):
937 if to_be_selected >= to_be_masked:
938 # We only care about the parent atoms
939 # when they trigger a downgrade.
942 fallback_data.append((to_be_masked, parent_atoms))
945 # 'to_be_masked' does not violate any parent atom, which means
946 # there is no point in masking it.
949 backtrack_data.append((to_be_masked, parent_atoms))
951 if not backtrack_data:
952 # This shouldn't happen, but fall back to the old
953 # behavior if this gets triggered somehow.
954 backtrack_data = fallback_data
956 if len(backtrack_data) > 1:
957 # NOTE: Generally, we prefer to mask the higher
958 # version since this solves common cases in which a
959 # lower version is needed so that all dependencies
960 # will be satisfied (bug #337178). However, if
961 # existing_node happens to be installed then we
962 # mask that since this is a common case that is
963 # triggered when --update is not enabled.
964 if existing_node.installed:
966 elif pkg > existing_node:
967 backtrack_data.reverse()
969 to_be_masked, parent_atoms = backtrack_data[-1]
971 self._dynamic_config._runtime_pkg_mask.setdefault(
972 to_be_masked, {})["slot conflict"] = parent_atoms
973 self._dynamic_config._need_restart = True
974 if "--debug" in self._frozen_config.myopts:
978 msg.append("backtracking due to slot conflict:")
979 if backtrack_data is fallback_data:
980 msg.append("!!! backtrack_data fallback")
981 msg.append(" first package: %s" % existing_node)
982 msg.append(" second package: %s" % pkg)
983 msg.append(" package to mask: %s" % to_be_masked)
984 msg.append(" slot: %s" % pkg.slot_atom)
985 msg.append(" parents: %s" % ", ".join( \
986 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
988 writemsg_level("".join("%s\n" % l for l in msg),
989 noiselevel=-1, level=logging.DEBUG)
992 # A slot collision has occurred. Sometimes this coincides
993 # with unresolvable blockers, so the slot collision will be
994 # shown later if there are no unresolvable blockers.
995 self._add_slot_conflict(pkg)
996 slot_collision = True
999 # Now add this node to the graph so that self.display()
1000 # can show use flags and --tree portage.output. This node is
1001 # only being partially added to the graph. It must not be
1002 # allowed to interfere with the other nodes that have been
1003 # added. Do not overwrite data for existing nodes in
1004 # self._dynamic_config.mydbapi since that data will be used for blocker
1006 # Even though the graph is now invalid, continue to process
1007 # dependencies so that things like --fetchonly can still
1008 # function despite collisions.
1010 elif not previously_added:
1011 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1012 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1013 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1014 self._dynamic_config._highest_pkg_cache.clear()
1015 self._check_masks(pkg)
1017 if not pkg.installed:
1018 # Allow this package to satisfy old-style virtuals in case it
1019 # doesn't already. Any pre-existing providers will be preferred
1022 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1023 # For consistency, also update the global virtuals.
1024 settings = self._frozen_config.roots[pkg.root].settings
1026 settings.setinst(pkg.cpv, pkg.metadata)
1028 except portage.exception.InvalidDependString as e:
1029 if not pkg.installed:
1030 # should have been masked before it was selected
1034 self._dynamic_config._set_nodes.add(pkg)
1036 # Do this even when addme is False (--onlydeps) so that the
1037 # parent/child relationship is always known in case
1038 # self._show_slot_collision_notice() needs to be called later.
1039 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1040 if dep.atom is not None and dep.parent is not None:
1041 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1044 for parent_atom in arg_atoms:
1045 parent, atom = parent_atom
1046 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1047 self._add_parent_atom(pkg, parent_atom)
1049 """ This section determines whether we go deeper into dependencies or not.
1050 We want to go deeper on a few occasions:
1051 Installing package A, we need to make sure package A's deps are met.
1052 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1053 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1058 deep = self._dynamic_config.myparams.get("deep", 0)
1059 empty = "empty" in self._dynamic_config.myparams
1060 recurse = empty or deep is True or depth + 1 <= deep
1061 dep_stack = self._dynamic_config._dep_stack
1062 if "recurse" not in self._dynamic_config.myparams:
1064 elif pkg.installed and not recurse:
1065 dep_stack = self._dynamic_config._ignored_deps
1067 self._spinner_update()
1069 if not previously_added:
1070 dep_stack.append(pkg)
1073 def _check_masks(self, pkg):
1075 slot_key = (pkg.root, pkg.slot_atom)
1077 # Check for upgrades in the same slot that are
1078 # masked due to a LICENSE change in a newer
1079 # version that is not masked for any other reason.
1080 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1081 if other_pkg is not None and pkg < other_pkg:
1082 self._dynamic_config._masked_license_updates.add(other_pkg)
1084 def _add_parent_atom(self, pkg, parent_atom):
1085 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1086 if parent_atoms is None:
1087 parent_atoms = set()
1088 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1089 parent_atoms.add(parent_atom)
1091 def _add_slot_conflict(self, pkg):
1092 self._dynamic_config._slot_collision_nodes.add(pkg)
1093 slot_key = (pkg.slot_atom, pkg.root)
1094 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1095 if slot_nodes is None:
1097 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1098 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1101 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1103 mytype = pkg.type_name
1106 metadata = pkg.metadata
1107 myuse = self._pkg_use_enabled(pkg)
1109 depth = pkg.depth + 1
1110 removal_action = "remove" in self._dynamic_config.myparams
1113 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1115 edepend[k] = metadata[k]
1117 if not pkg.built and \
1118 "--buildpkgonly" in self._frozen_config.myopts and \
1119 "deep" not in self._dynamic_config.myparams and \
1120 "empty" not in self._dynamic_config.myparams:
1121 edepend["RDEPEND"] = ""
1122 edepend["PDEPEND"] = ""
1124 if pkg.built and not removal_action:
1125 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1126 # Pull in build time deps as requested, but marked them as
1127 # "optional" since they are not strictly required. This allows
1128 # more freedom in the merge order calculation for solving
1129 # circular dependencies. Don't convert to PDEPEND since that
1130 # could make --with-bdeps=y less effective if it is used to
1131 # adjust merge order to prevent built_with_use() calls from
1135 # built packages do not have build time dependencies.
1136 edepend["DEPEND"] = ""
1138 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1139 edepend["DEPEND"] = ""
1142 depend_root = myroot
1145 root_deps = self._frozen_config.myopts.get("--root-deps")
1146 if root_deps is not None:
1147 if root_deps is True:
1148 depend_root = myroot
1149 elif root_deps == "rdeps":
1150 edepend["DEPEND"] = ""
1153 (depend_root, edepend["DEPEND"],
1154 self._priority(buildtime=(not pkg.built),
1155 optional=pkg.built),
1157 (myroot, edepend["RDEPEND"],
1158 self._priority(runtime=True),
1160 (myroot, edepend["PDEPEND"],
1161 self._priority(runtime_post=True),
1165 debug = "--debug" in self._frozen_config.myopts
1166 strict = mytype != "installed"
1168 for dep_root, dep_string, dep_priority, ignore_blockers in deps:
1172 writemsg_level("\nParent: %s\n" % (pkg,),
1173 noiselevel=-1, level=logging.DEBUG)
1174 writemsg_level("Depstring: %s\n" % (dep_string,),
1175 noiselevel=-1, level=logging.DEBUG)
1176 writemsg_level("Priority: %s\n" % (dep_priority,),
1177 noiselevel=-1, level=logging.DEBUG)
1180 dep_string = portage.dep.use_reduce(dep_string,
1181 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1182 except portage.exception.InvalidDependString as e:
1183 if not pkg.installed:
1184 # should have been masked before it was selected
1188 # Try again, but omit the is_valid_flag argument, since
1189 # invalid USE conditionals are a common problem and it's
1190 # practical to ignore this issue for installed packages.
1192 dep_string = portage.dep.use_reduce(dep_string,
1193 uselist=self._pkg_use_enabled(pkg))
1194 except portage.exception.InvalidDependString as e:
1195 self._dynamic_config._masked_installed.add(pkg)
1200 dep_string = list(self._queue_disjunctive_deps(
1201 pkg, dep_root, dep_priority, dep_string))
1202 except portage.exception.InvalidDependString as e:
1204 self._dynamic_config._masked_installed.add(pkg)
1208 # should have been masked before it was selected
1214 dep_string = portage.dep.paren_enclose(dep_string,
1215 unevaluated_atom=True)
1217 if not self._add_pkg_dep_string(
1218 pkg, dep_root, dep_priority, dep_string,
1219 allow_unsatisfied, ignore_blockers=ignore_blockers):
1222 self._dynamic_config._traversed_pkg_deps.add(pkg)
1225 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1226 allow_unsatisfied, ignore_blockers=False):
1227 depth = pkg.depth + 1
1228 debug = "--debug" in self._frozen_config.myopts
1229 strict = pkg.type_name != "installed"
1232 writemsg_level("\nParent: %s\n" % (pkg,),
1233 noiselevel=-1, level=logging.DEBUG)
1234 writemsg_level("Depstring: %s\n" % (dep_string,),
1235 noiselevel=-1, level=logging.DEBUG)
1236 writemsg_level("Priority: %s\n" % (dep_priority,),
1237 noiselevel=-1, level=logging.DEBUG)
1240 selected_atoms = self._select_atoms(dep_root,
1241 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1242 strict=strict, priority=dep_priority)
1243 except portage.exception.InvalidDependString as e:
1245 self._dynamic_config._masked_installed.add(pkg)
1248 # should have been masked before it was selected
1252 writemsg_level("Candidates: %s\n" % \
1253 ([str(x) for x in selected_atoms[pkg]],),
1254 noiselevel=-1, level=logging.DEBUG)
1256 root_config = self._frozen_config.roots[dep_root]
1257 vardb = root_config.trees["vartree"].dbapi
1259 for atom, child in self._minimize_children(
1260 pkg, dep_priority, root_config, selected_atoms[pkg]):
1262 if ignore_blockers and atom.blocker:
1263 # For --with-bdeps, ignore build-time only blockers
1264 # that originate from built packages.
1267 mypriority = dep_priority.copy()
1268 if not atom.blocker:
1269 inst_pkgs = vardb.match_pkgs(atom)
1271 for inst_pkg in inst_pkgs:
1272 if self._pkg_visibility_check(inst_pkg):
1274 mypriority.satisfied = inst_pkg
1276 if not mypriority.satisfied:
1277 # none visible, so use highest
1278 mypriority.satisfied = inst_pkgs[0]
1280 if not self._add_dep(Dependency(atom=atom,
1281 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1282 priority=mypriority, root=dep_root),
1283 allow_unsatisfied=allow_unsatisfied):
1286 selected_atoms.pop(pkg)
1288 # Add selected indirect virtual deps to the graph. This
1289 # takes advantage of circular dependency avoidance that's done
1290 # by dep_zapdeps. We preserve actual parent/child relationships
1291 # here in order to avoid distorting the dependency graph like
1292 # <=portage-2.1.6.x did.
1293 for virt_pkg, atoms in selected_atoms.items():
1296 writemsg_level("Candidates: %s: %s\n" % \
1297 (virt_pkg.cpv, [str(x) for x in atoms]),
1298 noiselevel=-1, level=logging.DEBUG)
1300 # Just assume depth + 1 here for now, though it's not entirely
1301 # accurate since multilple levels of indirect virtual deps may
1302 # have been traversed. The _add_pkg call will reset the depth to
1303 # 0 if this package happens to match an argument.
1304 if not self._add_pkg(virt_pkg,
1305 Dependency(atom=Atom('=' + virt_pkg.cpv),
1306 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1310 for atom, child in self._minimize_children(
1311 pkg, self._priority(runtime=True), root_config, atoms):
1312 # This is a GLEP 37 virtual, so its deps are all runtime.
1313 mypriority = self._priority(runtime=True)
1314 if not atom.blocker:
1315 inst_pkgs = vardb.match_pkgs(atom)
1317 for inst_pkg in inst_pkgs:
1318 if self._pkg_visibility_check(inst_pkg):
1320 mypriority.satisfied = inst_pkg
1322 if not mypriority.satisfied:
1323 # none visible, so use highest
1324 mypriority.satisfied = inst_pkgs[0]
1326 if not self._add_dep(Dependency(atom=atom,
1327 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1328 parent=virt_pkg, priority=mypriority, root=dep_root),
1329 allow_unsatisfied=allow_unsatisfied):
1333 writemsg_level("Exiting... %s\n" % (pkg,),
1334 noiselevel=-1, level=logging.DEBUG)
1338 def _minimize_children(self, parent, priority, root_config, atoms):
1340 Selects packages to satisfy the given atoms, and minimizes the
1341 number of selected packages. This serves to identify and eliminate
1342 redundant package selections when multiple atoms happen to specify
1352 dep_pkg, existing_node = self._select_package(
1353 root_config.root, atom)
1357 atom_pkg_map[atom] = dep_pkg
1359 if len(atom_pkg_map) < 2:
1360 for item in atom_pkg_map.items():
1366 for atom, pkg in atom_pkg_map.items():
1367 pkg_atom_map.setdefault(pkg, set()).add(atom)
1368 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1370 for cp, pkgs in cp_pkg_map.items():
1373 for atom in pkg_atom_map[pkg]:
1377 # Use a digraph to identify and eliminate any
1378 # redundant package selections.
1379 atom_pkg_graph = digraph()
1382 for atom in pkg_atom_map[pkg1]:
1384 atom_pkg_graph.add(pkg1, atom)
1385 atom_set = InternalPackageSet(initial_atoms=(atom,))
1389 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1390 atom_pkg_graph.add(pkg2, atom)
1393 eliminate_pkg = True
1394 for atom in atom_pkg_graph.parent_nodes(pkg):
1395 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1396 eliminate_pkg = False
1399 atom_pkg_graph.remove(pkg)
1401 # Yield ~, =*, < and <= atoms first, since those are more likely to
1402 # cause slot conflicts, and we want those atoms to be displayed
1403 # in the resulting slot conflict message (see bug #291142).
1406 for atom in cp_atoms:
1408 for child_pkg in atom_pkg_graph.child_nodes(atom):
1409 existing_node, matches = \
1410 self._check_slot_conflict(child_pkg, atom)
1411 if existing_node and not matches:
1415 conflict_atoms.append(atom)
1417 normal_atoms.append(atom)
1419 for atom in chain(conflict_atoms, normal_atoms):
1420 child_pkgs = atom_pkg_graph.child_nodes(atom)
1421 # if more than one child, yield highest version
1422 if len(child_pkgs) > 1:
1424 yield (atom, child_pkgs[-1])
1426 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1428 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1429 Yields non-disjunctive deps. Raises InvalidDependString when
1433 while i < len(dep_struct):
1435 if isinstance(x, list):
1436 for y in self._queue_disjunctive_deps(
1437 pkg, dep_root, dep_priority, x):
1440 self._queue_disjunction(pkg, dep_root, dep_priority,
1441 [ x, dep_struct[ i + 1 ] ] )
1445 x = portage.dep.Atom(x)
1446 except portage.exception.InvalidAtom:
1447 if not pkg.installed:
1448 raise portage.exception.InvalidDependString(
1449 "invalid atom: '%s'" % x)
1451 # Note: Eventually this will check for PROPERTIES=virtual
1452 # or whatever other metadata gets implemented for this
1454 if x.cp.startswith('virtual/'):
1455 self._queue_disjunction( pkg, dep_root,
1456 dep_priority, [ str(x) ] )
1461 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1462 self._dynamic_config._dep_disjunctive_stack.append(
1463 (pkg, dep_root, dep_priority, dep_struct))
1465 def _pop_disjunction(self, allow_unsatisfied):
1467 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1468 populate self._dynamic_config._dep_stack.
1470 pkg, dep_root, dep_priority, dep_struct = \
1471 self._dynamic_config._dep_disjunctive_stack.pop()
1472 dep_string = portage.dep.paren_enclose(dep_struct,
1473 unevaluated_atom=True)
1474 if not self._add_pkg_dep_string(
1475 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1479 def _priority(self, **kwargs):
1480 if "remove" in self._dynamic_config.myparams:
1481 priority_constructor = UnmergeDepPriority
1483 priority_constructor = DepPriority
1484 return priority_constructor(**kwargs)
1486 def _dep_expand(self, root_config, atom_without_category):
1488 @param root_config: a root config instance
1489 @type root_config: RootConfig
1490 @param atom_without_category: an atom without a category component
1491 @type atom_without_category: String
1493 @returns: a list of atoms containing categories (possibly empty)
1495 null_cp = portage.dep_getkey(insert_category_into_atom(
1496 atom_without_category, "null"))
1497 cat, atom_pn = portage.catsplit(null_cp)
1499 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1501 for db, pkg_type, built, installed, db_keys in dbs:
1502 for cat in db.categories:
1503 if db.cp_list("%s/%s" % (cat, atom_pn)):
1507 for cat in categories:
1508 deps.append(Atom(insert_category_into_atom(
1509 atom_without_category, cat)))
1512 def _have_new_virt(self, root, atom_cp):
1514 for db, pkg_type, built, installed, db_keys in \
1515 self._dynamic_config._filtered_trees[root]["dbs"]:
1516 if db.cp_list(atom_cp):
1521 def _iter_atoms_for_pkg(self, pkg):
1522 depgraph_sets = self._dynamic_config.sets[pkg.root]
1523 atom_arg_map = depgraph_sets.atom_arg_map
1524 root_config = self._frozen_config.roots[pkg.root]
1525 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1526 if atom.cp != pkg.cp and \
1527 self._have_new_virt(pkg.root, atom.cp):
1530 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1531 visible_pkgs.reverse() # descending order
1533 for visible_pkg in visible_pkgs:
1534 if visible_pkg.cp != atom.cp:
1536 if pkg >= visible_pkg:
1537 # This is descending order, and we're not
1538 # interested in any versions <= pkg given.
1540 if pkg.slot_atom != visible_pkg.slot_atom:
1541 higher_slot = visible_pkg
1543 if higher_slot is not None:
1545 for arg in atom_arg_map[(atom, pkg.root)]:
1546 if isinstance(arg, PackageArg) and \
1551 def select_files(self, myfiles):
1552 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1553 self._dynamic_config._initial_arg_list and call self._resolve to create the
1554 appropriate depgraph and return a favorite list."""
1556 debug = "--debug" in self._frozen_config.myopts
1557 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1558 sets = root_config.sets
1559 depgraph_sets = self._dynamic_config.sets[root_config.root]
1561 myroot = self._frozen_config.target_root
1562 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1563 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1564 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1565 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1566 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1567 pkgsettings = self._frozen_config.pkgsettings[myroot]
1569 onlydeps = "--onlydeps" in self._frozen_config.myopts
1572 ext = os.path.splitext(x)[1]
1574 if not os.path.exists(x):
1576 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1577 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1578 elif os.path.exists(
1579 os.path.join(pkgsettings["PKGDIR"], x)):
1580 x = os.path.join(pkgsettings["PKGDIR"], x)
1582 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1583 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1584 return 0, myfavorites
1585 mytbz2=portage.xpak.tbz2(x)
1586 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1587 if os.path.realpath(x) != \
1588 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1589 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1590 self._dynamic_config._skip_restart = True
1591 return 0, myfavorites
1593 pkg = self._pkg(mykey, "binary", root_config,
1595 args.append(PackageArg(arg=x, package=pkg,
1596 root_config=root_config))
1597 elif ext==".ebuild":
1598 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1599 pkgdir = os.path.dirname(ebuild_path)
1600 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1601 cp = pkgdir[len(tree_root)+1:]
1602 e = portage.exception.PackageNotFound(
1603 ("%s is not in a valid portage tree " + \
1604 "hierarchy or does not exist") % x)
1605 if not portage.isvalidatom(cp):
1607 cat = portage.catsplit(cp)[0]
1608 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1609 if not portage.isvalidatom("="+mykey):
1611 ebuild_path = portdb.findname(mykey)
1613 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1614 cp, os.path.basename(ebuild_path)):
1615 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1616 self._dynamic_config._skip_restart = True
1617 return 0, myfavorites
1618 if mykey not in portdb.xmatch(
1619 "match-visible", portage.cpv_getkey(mykey)):
1620 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1621 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1622 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1623 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1626 raise portage.exception.PackageNotFound(
1627 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1628 pkg = self._pkg(mykey, "ebuild", root_config,
1630 args.append(PackageArg(arg=x, package=pkg,
1631 root_config=root_config))
1632 elif x.startswith(os.path.sep):
1633 if not x.startswith(myroot):
1634 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1635 " $ROOT.\n") % x, noiselevel=-1)
1636 self._dynamic_config._skip_restart = True
1638 # Queue these up since it's most efficient to handle
1639 # multiple files in a single iter_owners() call.
1640 lookup_owners.append(x)
1641 elif x.startswith("." + os.sep) or \
1642 x.startswith(".." + os.sep):
1643 f = os.path.abspath(x)
1644 if not f.startswith(myroot):
1645 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
1646 " $ROOT.\n") % (f, x), noiselevel=-1)
1647 self._dynamic_config._skip_restart = True
1649 lookup_owners.append(f)
1651 if x in ("system", "world"):
1653 if x.startswith(SETPREFIX):
1654 s = x[len(SETPREFIX):]
1656 raise portage.exception.PackageSetNotFound(s)
1657 if s in depgraph_sets.sets:
1660 depgraph_sets.sets[s] = pset
1661 args.append(SetArg(arg=x, pset=pset,
1662 root_config=root_config))
1664 if not is_valid_package_atom(x):
1665 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1667 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1668 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1669 self._dynamic_config._skip_restart = True
1671 # Don't expand categories or old-style virtuals here unless
1672 # necessary. Expansion of old-style virtuals here causes at
1673 # least the following problems:
1674 # 1) It's more difficult to determine which set(s) an atom
1675 # came from, if any.
1676 # 2) It takes away freedom from the resolver to choose other
1677 # possible expansions when necessary.
1679 args.append(AtomArg(arg=x, atom=Atom(x),
1680 root_config=root_config))
1682 expanded_atoms = self._dep_expand(root_config, x)
1683 installed_cp_set = set()
1684 for atom in expanded_atoms:
1685 if vardb.cp_list(atom.cp):
1686 installed_cp_set.add(atom.cp)
1688 if len(installed_cp_set) > 1:
1689 non_virtual_cps = set()
1690 for atom_cp in installed_cp_set:
1691 if not atom_cp.startswith("virtual/"):
1692 non_virtual_cps.add(atom_cp)
1693 if len(non_virtual_cps) == 1:
1694 installed_cp_set = non_virtual_cps
1696 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1697 installed_cp = next(iter(installed_cp_set))
1698 for atom in expanded_atoms:
1699 if atom.cp == installed_cp:
1701 for pkg in self._iter_match_pkgs_any(
1702 root_config, atom.without_use,
1704 if not pkg.installed:
1708 expanded_atoms = [atom]
1711 # If a non-virtual package and one or more virtual packages
1712 # are in expanded_atoms, use the non-virtual package.
1713 if len(expanded_atoms) > 1:
1714 number_of_virtuals = 0
1715 for expanded_atom in expanded_atoms:
1716 if expanded_atom.cp.startswith("virtual/"):
1717 number_of_virtuals += 1
1719 candidate = expanded_atom
1720 if len(expanded_atoms) - number_of_virtuals == 1:
1721 expanded_atoms = [ candidate ]
1723 if len(expanded_atoms) > 1:
1724 writemsg("\n\n", noiselevel=-1)
1725 ambiguous_package_name(x, expanded_atoms, root_config,
1726 self._frozen_config.spinner, self._frozen_config.myopts)
1727 self._dynamic_config._skip_restart = True
1728 return False, myfavorites
1730 atom = expanded_atoms[0]
1732 null_atom = Atom(insert_category_into_atom(x, "null"))
1733 cat, atom_pn = portage.catsplit(null_atom.cp)
1734 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1736 # Allow the depgraph to choose which virtual.
1737 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1741 args.append(AtomArg(arg=x, atom=atom,
1742 root_config=root_config))
1746 search_for_multiple = False
1747 if len(lookup_owners) > 1:
1748 search_for_multiple = True
1750 for x in lookup_owners:
1751 if not search_for_multiple and os.path.isdir(x):
1752 search_for_multiple = True
1753 relative_paths.append(x[len(myroot)-1:])
1756 for pkg, relative_path in \
1757 real_vardb._owners.iter_owners(relative_paths):
1758 owners.add(pkg.mycpv)
1759 if not search_for_multiple:
1763 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1764 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1765 self._dynamic_config._skip_restart = True
1769 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1771 # portage now masks packages with missing slot, but it's
1772 # possible that one was installed by an older version
1773 atom = Atom(portage.cpv_getkey(cpv))
1775 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1776 args.append(AtomArg(arg=atom, atom=atom,
1777 root_config=root_config))
1779 if "--update" in self._frozen_config.myopts:
1780 # In some cases, the greedy slots behavior can pull in a slot that
1781 # the user would want to uninstall due to it being blocked by a
1782 # newer version in a different slot. Therefore, it's necessary to
1783 # detect and discard any that should be uninstalled. Each time
1784 # that arguments are updated, package selections are repeated in
1785 # order to ensure consistency with the current arguments:
1787 # 1) Initialize args
1788 # 2) Select packages and generate initial greedy atoms
1789 # 3) Update args with greedy atoms
1790 # 4) Select packages and generate greedy atoms again, while
1791 # accounting for any blockers between selected packages
1792 # 5) Update args with revised greedy atoms
1794 self._set_args(args)
1797 greedy_args.append(arg)
1798 if not isinstance(arg, AtomArg):
1800 for atom in self._greedy_slots(arg.root_config, arg.atom):
1802 AtomArg(arg=arg.arg, atom=atom,
1803 root_config=arg.root_config))
1805 self._set_args(greedy_args)
1808 # Revise greedy atoms, accounting for any blockers
1809 # between selected packages.
1810 revised_greedy_args = []
1812 revised_greedy_args.append(arg)
1813 if not isinstance(arg, AtomArg):
1815 for atom in self._greedy_slots(arg.root_config, arg.atom,
1816 blocker_lookahead=True):
1817 revised_greedy_args.append(
1818 AtomArg(arg=arg.arg, atom=atom,
1819 root_config=arg.root_config))
1820 args = revised_greedy_args
1821 del revised_greedy_args
1823 self._set_args(args)
1825 myfavorites = set(myfavorites)
1827 if isinstance(arg, (AtomArg, PackageArg)):
1828 myfavorites.add(arg.atom)
1829 elif isinstance(arg, SetArg):
1830 myfavorites.add(arg.arg)
1831 myfavorites = list(myfavorites)
1834 portage.writemsg("\n", noiselevel=-1)
1835 # Order needs to be preserved since a feature of --nodeps
1836 # is to allow the user to force a specific merge order.
1837 self._dynamic_config._initial_arg_list = args[:]
1839 return self._resolve(myfavorites)
1841 def _resolve(self, myfavorites):
1842 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1843 call self._creategraph to process theier deps and return
1845 debug = "--debug" in self._frozen_config.myopts
1846 onlydeps = "--onlydeps" in self._frozen_config.myopts
1847 myroot = self._frozen_config.target_root
1848 pkgsettings = self._frozen_config.pkgsettings[myroot]
1849 pprovideddict = pkgsettings.pprovideddict
1850 virtuals = pkgsettings.getvirtuals()
1851 for arg in self._expand_set_args(
1852 self._dynamic_config._initial_arg_list,
1853 add_to_digraph=True):
1854 for atom in arg.pset.getAtoms():
1855 self._spinner_update()
1856 dep = Dependency(atom=atom, onlydeps=onlydeps,
1857 root=myroot, parent=arg)
1859 pprovided = pprovideddict.get(atom.cp)
1860 if pprovided and portage.match_from_list(atom, pprovided):
1861 # A provided package has been specified on the command line.
1862 self._dynamic_config._pprovided_args.append((arg, atom))
1864 if isinstance(arg, PackageArg):
1865 if not self._add_pkg(arg.package, dep) or \
1866 not self._create_graph():
1867 if not self.need_restart():
1868 sys.stderr.write(("\n\n!!! Problem " + \
1869 "resolving dependencies for %s\n") % \
1871 return 0, myfavorites
1874 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1875 (arg, atom), noiselevel=-1)
1876 pkg, existing_node = self._select_package(
1877 myroot, atom, onlydeps=onlydeps)
1879 pprovided_match = False
1880 for virt_choice in virtuals.get(atom.cp, []):
1881 expanded_atom = portage.dep.Atom(
1882 atom.replace(atom.cp, virt_choice.cp, 1))
1883 pprovided = pprovideddict.get(expanded_atom.cp)
1885 portage.match_from_list(expanded_atom, pprovided):
1886 # A provided package has been
1887 # specified on the command line.
1888 self._dynamic_config._pprovided_args.append((arg, atom))
1889 pprovided_match = True
1894 if not (isinstance(arg, SetArg) and \
1895 arg.name in ("selected", "system", "world")):
1896 self._dynamic_config._unsatisfied_deps_for_display.append(
1897 ((myroot, atom), {"myparent" : arg}))
1898 return 0, myfavorites
1900 self._dynamic_config._missing_args.append((arg, atom))
1902 if atom.cp != pkg.cp:
1903 # For old-style virtuals, we need to repeat the
1904 # package.provided check against the selected package.
1905 expanded_atom = atom.replace(atom.cp, pkg.cp)
1906 pprovided = pprovideddict.get(pkg.cp)
1908 portage.match_from_list(expanded_atom, pprovided):
1909 # A provided package has been
1910 # specified on the command line.
1911 self._dynamic_config._pprovided_args.append((arg, atom))
1913 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1914 self._dynamic_config._unsatisfied_deps_for_display.append(
1915 ((myroot, atom), {"myparent" : arg}))
1916 # Previous behavior was to bail out in this case, but
1917 # since the dep is satisfied by the installed package,
1918 # it's more friendly to continue building the graph
1919 # and just show a warning message. Therefore, only bail
1920 # out here if the atom is not from either the system or
1922 if not (isinstance(arg, SetArg) and \
1923 arg.name in ("selected", "system", "world")):
1924 return 0, myfavorites
1926 # Add the selected package to the graph as soon as possible
1927 # so that later dep_check() calls can use it as feedback
1928 # for making more consistent atom selections.
1929 if not self._add_pkg(pkg, dep):
1930 if self.need_restart():
1932 elif isinstance(arg, SetArg):
1933 writemsg(("\n\n!!! Problem resolving " + \
1934 "dependencies for %s from %s\n") % \
1935 (atom, arg.arg), noiselevel=-1)
1937 writemsg(("\n\n!!! Problem resolving " + \
1938 "dependencies for %s\n") % \
1939 (atom,), noiselevel=-1)
1940 return 0, myfavorites
1942 except SystemExit as e:
1943 raise # Needed else can't exit
1944 except Exception as e:
1945 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
1946 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
1949 # Now that the root packages have been added to the graph,
1950 # process the dependencies.
1951 if not self._create_graph():
1952 return 0, myfavorites
1956 except self._unknown_internal_error:
1957 return False, myfavorites
1959 if set(self._dynamic_config.digraph).intersection( \
1960 self._dynamic_config._needed_unstable_keywords) or \
1961 set(self._dynamic_config.digraph).intersection( \
1962 self._dynamic_config._needed_use_config_changes) or \
1963 set(self._dynamic_config.digraph).intersection( \
1964 self._dynamic_config._needed_license_changes) :
1965 #We failed if the user needs to change the configuration
1966 return False, myfavorites
1968 # We're true here unless we are missing binaries.
1969 return (True, myfavorites)
1971 def _set_args(self, args):
1973 Create the "__non_set_args__" package set from atoms and packages given as
1974 arguments. This method can be called multiple times if necessary.
1975 The package selection cache is automatically invalidated, since
1976 arguments influence package selections.
1981 for root in self._dynamic_config.sets:
1982 depgraph_sets = self._dynamic_config.sets[root]
1983 depgraph_sets.sets.setdefault('__non_set_args__',
1984 InternalPackageSet()).clear()
1985 depgraph_sets.atoms.clear()
1986 depgraph_sets.atom_arg_map.clear()
1987 set_atoms[root] = []
1988 non_set_atoms[root] = []
1990 # We don't add set args to the digraph here since that
1991 # happens at a later stage and we don't want to make
1992 # any state changes here that aren't reversed by a
1993 # another call to this method.
1994 for arg in self._expand_set_args(args, add_to_digraph=False):
1995 atom_arg_map = self._dynamic_config.sets[
1996 arg.root_config.root].atom_arg_map
1997 if isinstance(arg, SetArg):
1998 atom_group = set_atoms[arg.root_config.root]
2000 atom_group = non_set_atoms[arg.root_config.root]
2002 for atom in arg.pset.getAtoms():
2003 atom_group.append(atom)
2004 atom_key = (atom, arg.root_config.root)
2005 refs = atom_arg_map.get(atom_key)
2008 atom_arg_map[atom_key] = refs
2012 for root in self._dynamic_config.sets:
2013 depgraph_sets = self._dynamic_config.sets[root]
2014 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2015 non_set_atoms.get(root, [])))
2016 depgraph_sets.sets['__non_set_args__'].update(
2017 non_set_atoms.get(root, []))
2019 # Invalidate the package selection cache, since
2020 # arguments influence package selections.
2021 self._dynamic_config._highest_pkg_cache.clear()
2022 for trees in self._dynamic_config._filtered_trees.values():
2023 trees["porttree"].dbapi._clear_cache()
2025 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2027 Return a list of slot atoms corresponding to installed slots that
2028 differ from the slot of the highest visible match. When
2029 blocker_lookahead is True, slot atoms that would trigger a blocker
2030 conflict are automatically discarded, potentially allowing automatic
2031 uninstallation of older slots when appropriate.
2033 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2034 if highest_pkg is None:
2036 vardb = root_config.trees["vartree"].dbapi
2038 for cpv in vardb.match(atom):
2039 # don't mix new virtuals with old virtuals
2040 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2041 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2043 slots.add(highest_pkg.metadata["SLOT"])
2047 slots.remove(highest_pkg.metadata["SLOT"])
2050 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2051 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2052 if pkg is not None and \
2053 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2054 greedy_pkgs.append(pkg)
2057 if not blocker_lookahead:
2058 return [pkg.slot_atom for pkg in greedy_pkgs]
2061 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2062 for pkg in greedy_pkgs + [highest_pkg]:
2063 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2065 selected_atoms = self._select_atoms(
2066 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2067 parent=pkg, strict=True)
2068 except portage.exception.InvalidDependString:
2071 for atoms in selected_atoms.values():
2072 blocker_atoms.extend(x for x in atoms if x.blocker)
2073 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2075 if highest_pkg not in blockers:
2078 # filter packages with invalid deps
2079 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2081 # filter packages that conflict with highest_pkg
2082 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2083 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2084 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2089 # If two packages conflict, discard the lower version.
2090 discard_pkgs = set()
2091 greedy_pkgs.sort(reverse=True)
2092 for i in range(len(greedy_pkgs) - 1):
2093 pkg1 = greedy_pkgs[i]
2094 if pkg1 in discard_pkgs:
2096 for j in range(i + 1, len(greedy_pkgs)):
2097 pkg2 = greedy_pkgs[j]
2098 if pkg2 in discard_pkgs:
2100 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2101 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2103 discard_pkgs.add(pkg2)
2105 return [pkg.slot_atom for pkg in greedy_pkgs \
2106 if pkg not in discard_pkgs]
2108 def _select_atoms_from_graph(self, *pargs, **kwargs):
2110 Prefer atoms matching packages that have already been
2111 added to the graph or those that are installed and have
2112 not been scheduled for replacement.
2114 kwargs["trees"] = self._dynamic_config._graph_trees
2115 return self._select_atoms_highest_available(*pargs, **kwargs)
2117 def _select_atoms_highest_available(self, root, depstring,
2118 myuse=None, parent=None, strict=True, trees=None, priority=None):
2119 """This will raise InvalidDependString if necessary. If trees is
2120 None then self._dynamic_config._filtered_trees is used."""
2122 pkgsettings = self._frozen_config.pkgsettings[root]
2124 trees = self._dynamic_config._filtered_trees
2125 mytrees = trees[root]
2126 atom_graph = digraph()
2128 # Temporarily disable autounmask so that || preferences
2129 # account for masking and USE settings.
2130 _autounmask_backup = self._dynamic_config._autounmask
2131 self._dynamic_config._autounmask = False
2132 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2134 if parent is not None:
2135 trees[root]["parent"] = parent
2136 trees[root]["atom_graph"] = atom_graph
2137 if priority is not None:
2138 trees[root]["priority"] = priority
2139 mycheck = portage.dep_check(depstring, None,
2140 pkgsettings, myuse=myuse,
2141 myroot=root, trees=trees)
2143 self._dynamic_config._autounmask = _autounmask_backup
2144 del mytrees["pkg_use_enabled"]
2145 if parent is not None:
2146 trees[root].pop("parent")
2147 trees[root].pop("atom_graph")
2148 if priority is not None:
2149 trees[root].pop("priority")
2151 raise portage.exception.InvalidDependString(mycheck[1])
2153 selected_atoms = mycheck[1]
2155 chosen_atoms = frozenset(mycheck[1])
2156 selected_atoms = {parent : []}
2157 for node in atom_graph:
2158 if isinstance(node, Atom):
2163 pkg, virt_atom = node
2164 if virt_atom not in chosen_atoms:
2166 if not portage.match_from_list(virt_atom, [pkg]):
2167 # Typically this means that the atom
2168 # specifies USE deps that are unsatisfied
2169 # by the selected package. The caller will
2170 # record this as an unsatisfied dependency
2174 selected_atoms[pkg] = [atom for atom in \
2175 atom_graph.child_nodes(node) if atom in chosen_atoms]
2177 return selected_atoms
2179 def _get_dep_chain(self, start_node, target_atom=None,
2180 unsatisfied_dependency=False):
2182 Returns a list of (atom, node_type) pairs that represent a dep chain.
2183 If target_atom is None, the first package shown is pkg's parent.
2184 If target_atom is not None the first package shown is pkg.
2185 If unsatisfied_dependency is True, the first parent is select who's
2186 dependency is not satisfied by 'pkg'. This is need for USE changes.
2187 (Does not support target_atom.)
2189 traversed_nodes = set()
2193 all_parents = self._dynamic_config._parent_atoms
2195 if target_atom is not None and isinstance(node, Package):
2196 affecting_use = set()
2197 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2198 affecting_use.update(extract_affecting_use(
2199 node.metadata[dep_str], target_atom))
2200 affecting_use.difference_update(node.use.mask, node.use.force)
2201 pkg_name = _unicode_decode("%s") % (node.cpv,)
2204 for flag in affecting_use:
2205 if flag in self._pkg_use_enabled(node):
2208 usedep.append("-"+flag)
2209 pkg_name += "[%s]" % ",".join(usedep)
2211 dep_chain.append((pkg_name, node.type_name))
2213 while node is not None:
2214 traversed_nodes.add(node)
2216 if isinstance(node, DependencyArg):
2217 if self._dynamic_config.digraph.parent_nodes(node):
2220 node_type = "argument"
2221 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2223 elif node is not start_node:
2224 for ppkg, patom in all_parents[child]:
2226 atom = patom.unevaluated_atom
2230 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2231 if priority.buildtime:
2232 dep_strings.add(node.metadata["DEPEND"])
2233 if priority.runtime:
2234 dep_strings.add(node.metadata["RDEPEND"])
2235 if priority.runtime_post:
2236 dep_strings.add(node.metadata["PDEPEND"])
2238 affecting_use = set()
2239 for dep_str in dep_strings:
2240 affecting_use.update(extract_affecting_use(dep_str, atom))
2242 #Don't show flags as 'affecting' if the user can't change them,
2243 affecting_use.difference_update(node.use.mask, \
2246 pkg_name = _unicode_decode("%s") % (node.cpv,)
2249 for flag in affecting_use:
2250 if flag in self._pkg_use_enabled(node):
2253 usedep.append("-"+flag)
2254 pkg_name += "[%s]" % ",".join(usedep)
2256 dep_chain.append((pkg_name, node.type_name))
2258 if node not in self._dynamic_config.digraph:
2259 # The parent is not in the graph due to backtracking.
2262 # When traversing to parents, prefer arguments over packages
2263 # since arguments are root nodes. Never traverse the same
2264 # package twice, in order to prevent an infinite loop.
2265 selected_parent = None
2266 for parent in self._dynamic_config.digraph.parent_nodes(node):
2267 if parent in traversed_nodes:
2269 if isinstance(parent, DependencyArg):
2270 if self._dynamic_config.digraph.parent_nodes(parent):
2271 selected_parent = parent
2275 (_unicode_decode("%s") % (parent,), "argument"))
2276 selected_parent = None
2279 if unsatisfied_dependency and node is start_node:
2280 # Make sure that pkg doesn't satisfy parent's dependency.
2281 # This ensures that we select the correct parent for use
2283 for ppkg, atom in all_parents[start_node]:
2285 atom_set = InternalPackageSet(initial_atoms=(atom,))
2286 if not atom_set.findAtomForPackage(start_node):
2287 selected_parent = parent
2291 selected_parent = parent
2293 node = selected_parent
2296 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2297 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2299 for node, node_type in dep_chain:
2300 if node_type == "argument":
2301 display_list.append("required by %s (argument)" % node)
2303 display_list.append("required by %s" % node)
2305 msg = "#" + ", ".join(display_list) + "\n"
2309 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2310 check_backtrack=False):
2312 When check_backtrack=True, no output is produced and
2313 the method either returns or raises _backtrack_mask if
2314 a matching package has been masked by backtracking.
2316 backtrack_mask = False
2317 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,))
2318 xinfo = '"%s"' % atom.unevaluated_atom
2321 if isinstance(myparent, AtomArg):
2322 xinfo = _unicode_decode('"%s"') % (myparent,)
2323 # Discard null/ from failed cpv_expand category expansion.
2324 xinfo = xinfo.replace("null/", "")
2326 xinfo = "%s for %s" % (xinfo, root)
2327 masked_packages = []
2329 missing_use_adjustable = set()
2330 required_use_unsatisfied = []
2331 masked_pkg_instances = set()
2332 missing_licenses = []
2333 have_eapi_mask = False
2334 pkgsettings = self._frozen_config.pkgsettings[root]
2335 root_config = self._frozen_config.roots[root]
2336 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2337 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2338 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2339 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2340 for db, pkg_type, built, installed, db_keys in dbs:
2344 if hasattr(db, "xmatch"):
2345 cpv_list = db.xmatch("match-all", atom.without_use)
2347 cpv_list = db.match(atom.without_use)
2350 for cpv in cpv_list:
2351 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, \
2352 pkg_type, built, installed, db_keys, _pkg_use_enabled=self._pkg_use_enabled)
2354 if metadata is not None:
2355 pkg = self._pkg(cpv, pkg_type, root_config,
2356 installed=installed)
2357 # pkg.metadata contains calculated USE for ebuilds,
2358 # required later for getMissingLicenses.
2359 metadata = pkg.metadata
2360 if pkg.cp != atom.cp:
2361 # A cpv can be returned from dbapi.match() as an
2362 # old-style virtual match even in cases when the
2363 # package does not actually PROVIDE the virtual.
2364 # Filter out any such false matches here.
2365 if not atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2367 if pkg in self._dynamic_config._runtime_pkg_mask:
2368 backtrack_reasons = \
2369 self._dynamic_config._runtime_pkg_mask[pkg]
2370 mreasons.append('backtracking: %s' % \
2371 ', '.join(sorted(backtrack_reasons)))
2372 backtrack_mask = True
2373 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2374 modified_use=self._pkg_use_enabled(pkg)):
2375 mreasons = ["exclude option"]
2377 masked_pkg_instances.add(pkg)
2378 if atom.unevaluated_atom.use:
2380 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2381 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2382 missing_use.append(pkg)
2386 writemsg("violated_conditionals raised " + \
2387 "InvalidAtom: '%s' parent: %s" % \
2388 (atom, myparent), noiselevel=-1)
2390 if not mreasons and \
2392 pkg.metadata["REQUIRED_USE"] and \
2393 eapi_has_required_use(pkg.metadata["EAPI"]):
2394 if not check_required_use(
2395 pkg.metadata["REQUIRED_USE"],
2396 self._pkg_use_enabled(pkg),
2397 pkg.iuse.is_valid_flag):
2398 required_use_unsatisfied.append(pkg)
2400 if pkg.built and not mreasons:
2401 mreasons = ["use flag configuration mismatch"]
2402 masked_packages.append(
2403 (root_config, pkgsettings, cpv, metadata, mreasons))
2407 raise self._backtrack_mask()
2411 missing_use_reasons = []
2412 missing_iuse_reasons = []
2413 for pkg in missing_use:
2414 use = self._pkg_use_enabled(pkg)
2416 #Use the unevaluated atom here, because some flags might have gone
2417 #lost during evaluation.
2418 required_flags = atom.unevaluated_atom.use.required
2419 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2423 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2424 missing_iuse_reasons.append((pkg, mreasons))
2426 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2427 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2429 untouchable_flags = \
2430 frozenset(chain(pkg.use.mask, pkg.use.force))
2431 if untouchable_flags.intersection(
2432 chain(need_enable, need_disable)):
2435 missing_use_adjustable.add(pkg)
2436 required_use = pkg.metadata["REQUIRED_USE"]
2437 required_use_warning = ""
2439 old_use = self._pkg_use_enabled(pkg)
2440 new_use = set(self._pkg_use_enabled(pkg))
2441 for flag in need_enable:
2443 for flag in need_disable:
2444 new_use.discard(flag)
2445 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2446 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2447 required_use_warning = ", this change violates use flag constraints " + \
2448 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2450 if need_enable or need_disable:
2452 changes.extend(colorize("red", "+" + x) \
2453 for x in need_enable)
2454 changes.extend(colorize("blue", "-" + x) \
2455 for x in need_disable)
2456 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2457 missing_use_reasons.append((pkg, mreasons))
2459 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2460 # Lets see if the violated use deps are conditional.
2461 # If so, suggest to change them on the parent.
2463 # If the child package is masked then a change to
2464 # parent USE is not a valid solution (a normal mask
2465 # message should be displayed instead).
2466 if pkg in masked_pkg_instances:
2470 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2471 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2472 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2473 #all violated use deps are conditional
2475 conditional = violated_atom.use.conditional
2476 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2477 conditional.enabled, conditional.disabled))
2479 untouchable_flags = \
2480 frozenset(chain(myparent.use.mask, myparent.use.force))
2481 if untouchable_flags.intersection(involved_flags):
2484 required_use = myparent.metadata["REQUIRED_USE"]
2485 required_use_warning = ""
2487 old_use = self._pkg_use_enabled(myparent)
2488 new_use = set(self._pkg_use_enabled(myparent))
2489 for flag in involved_flags:
2491 new_use.discard(flag)
2494 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2495 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2496 required_use_warning = ", this change violates use flag constraints " + \
2497 "defined by %s: '%s'" % (myparent.cpv, \
2498 human_readable_required_use(required_use))
2500 for flag in involved_flags:
2501 if flag in self._pkg_use_enabled(myparent):
2502 changes.append(colorize("blue", "-" + flag))
2504 changes.append(colorize("red", "+" + flag))
2505 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2506 if (myparent, mreasons) not in missing_use_reasons:
2507 missing_use_reasons.append((myparent, mreasons))
2509 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2510 in missing_use_reasons if pkg not in masked_pkg_instances]
2512 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2513 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2515 show_missing_use = False
2516 if unmasked_use_reasons:
2517 # Only show the latest version.
2518 show_missing_use = []
2520 parent_reason = None
2521 for pkg, mreasons in unmasked_use_reasons:
2523 if parent_reason is None:
2524 #This happens if a use change on the parent
2525 #leads to a satisfied conditional use dep.
2526 parent_reason = (pkg, mreasons)
2527 elif pkg_reason is None:
2528 #Don't rely on the first pkg in unmasked_use_reasons,
2529 #being the highest version of the dependency.
2530 pkg_reason = (pkg, mreasons)
2532 show_missing_use.append(pkg_reason)
2534 show_missing_use.append(parent_reason)
2536 elif unmasked_iuse_reasons:
2537 masked_with_iuse = False
2538 for pkg in masked_pkg_instances:
2539 #Use atom.unevaluated here, because some flags might have gone
2540 #lost during evaluation.
2541 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2542 # Package(s) with required IUSE are masked,
2543 # so display a normal masking message.
2544 masked_with_iuse = True
2546 if not masked_with_iuse:
2547 show_missing_use = unmasked_iuse_reasons
2549 if required_use_unsatisfied:
2550 # If there's a higher unmasked version in missing_use_adjustable
2551 # then we want to show that instead.
2552 for pkg in missing_use_adjustable:
2553 if pkg not in masked_pkg_instances and \
2554 pkg > required_use_unsatisfied[0]:
2555 required_use_unsatisfied = False
2560 if required_use_unsatisfied:
2561 # We have an unmasked package that only requires USE adjustment
2562 # in order to satisfy REQUIRED_USE, and nothing more. We assume
2563 # that the user wants the latest version, so only the first
2564 # instance is displayed.
2565 pkg = required_use_unsatisfied[0]
2566 output_cpv = pkg.cpv
2567 writemsg_stdout("\n!!! " + \
2568 colorize("BAD", "The ebuild selected to satisfy ") + \
2569 colorize("INFORM", xinfo) + \
2570 colorize("BAD", " has unmet requirements.") + "\n",
2572 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
2573 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
2575 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
2576 "are unsatisfied:\n", noiselevel=-1)
2577 reduced_noise = check_required_use(
2578 pkg.metadata["REQUIRED_USE"],
2579 self._pkg_use_enabled(pkg),
2580 pkg.iuse.is_valid_flag).tounicode()
2581 writemsg_stdout(" %s\n" % \
2582 human_readable_required_use(reduced_noise),
2584 normalized_required_use = \
2585 " ".join(pkg.metadata["REQUIRED_USE"].split())
2586 if reduced_noise != normalized_required_use:
2587 writemsg_stdout("\n The above constraints " + \
2588 "are a subset of the following complete expression:\n",
2590 writemsg_stdout(" %s\n" % \
2591 human_readable_required_use(normalized_required_use),
2593 writemsg_stdout("\n", noiselevel=-1)
2595 elif show_missing_use:
2596 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2597 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
2598 for pkg, mreasons in show_missing_use:
2599 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
2601 elif masked_packages:
2602 writemsg_stdout("\n!!! " + \
2603 colorize("BAD", "All ebuilds that could satisfy ") + \
2604 colorize("INFORM", xinfo) + \
2605 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
2606 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
2607 have_eapi_mask = show_masked_packages(masked_packages)
2609 writemsg_stdout("\n", noiselevel=-1)
2610 msg = ("The current version of portage supports " + \
2611 "EAPI '%s'. You must upgrade to a newer version" + \
2612 " of portage before EAPI masked packages can" + \
2613 " be installed.") % portage.const.EAPI
2614 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
2615 writemsg_stdout("\n", noiselevel=-1)
2618 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2619 if isinstance(myparent, AtomArg):
2620 cp = myparent.atom.cp
2621 cat, pkg = portage.catsplit(cp)
2625 writemsg_stdout("\nemerge: searching for similar names..."
2629 all_cp.update(vardb.cp_all())
2630 all_cp.update(portdb.cp_all())
2631 if "--usepkg" in self._frozen_config.myopts:
2632 all_cp.update(bindb.cp_all())
2635 matches = difflib.get_close_matches(cp, all_cp)
2638 for other_cp in all_cp:
2639 other_pkg = portage.catsplit(other_cp)[1]
2640 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
2641 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
2643 for pkg_match in pkg_matches:
2644 matches.extend(pkg_to_cp[pkg_match])
2646 if len(matches) == 1:
2647 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
2649 elif len(matches) > 1:
2651 "\nemerge: Maybe you meant any of these: %s?\n" % \
2652 (", ".join(matches),), noiselevel=-1)
2654 # Generally, this would only happen if
2655 # all dbapis are empty.
2656 writemsg_stdout(" nothing similar found.\n"
2659 if not isinstance(myparent, AtomArg):
2660 # It's redundant to show parent for AtomArg since
2661 # it's the same as 'xinfo' displayed above.
2662 dep_chain = self._get_dep_chain(myparent, atom)
2663 for node, node_type in dep_chain:
2664 msg.append('(dependency required by "%s" [%s])' % \
2665 (colorize('INFORM', _unicode_decode("%s") % \
2666 (node)), node_type))
2669 writemsg_stdout("\n".join(msg), noiselevel=-1)
2670 writemsg_stdout("\n", noiselevel=-1)
2674 writemsg_stdout("\n", noiselevel=-1)
2676 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
2677 for db, pkg_type, built, installed, db_keys in \
2678 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2679 for pkg in self._iter_match_pkgs(root_config,
2680 pkg_type, atom, onlydeps=onlydeps):
2683 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2685 Iterate over Package instances of pkg_type matching the given atom.
2686 This does not check visibility and it also does not match USE for
2687 unbuilt ebuilds since USE are lazily calculated after visibility
2688 checks (to avoid the expense when possible).
2691 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2693 if hasattr(db, "xmatch"):
2694 cpv_list = db.xmatch("match-all", atom)
2696 cpv_list = db.match(atom)
2698 # USE=multislot can make an installed package appear as if
2699 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2700 # won't do any good as long as USE=multislot is enabled since
2701 # the newly built package still won't have the expected slot.
2702 # Therefore, assume that such SLOT dependencies are already
2703 # satisfied rather than forcing a rebuild.
2704 installed = pkg_type == 'installed'
2705 if installed and not cpv_list and atom.slot:
2706 for cpv in db.match(atom.cp):
2707 slot_available = False
2708 for other_db, other_type, other_built, \
2709 other_installed, other_keys in \
2710 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2713 other_db.aux_get(cpv, ["SLOT"])[0]:
2714 slot_available = True
2718 if not slot_available:
2720 inst_pkg = self._pkg(cpv, "installed",
2721 root_config, installed=installed)
2722 # Remove the slot from the atom and verify that
2723 # the package matches the resulting atom.
2724 if portage.match_from_list(
2725 atom.without_slot, [inst_pkg]):
2733 for cpv in cpv_list:
2735 pkg = self._pkg(cpv, pkg_type, root_config,
2736 installed=installed, onlydeps=onlydeps)
2737 except portage.exception.PackageNotFound:
2740 if pkg.cp != atom.cp:
2741 # A cpv can be returned from dbapi.match() as an
2742 # old-style virtual match even in cases when the
2743 # package does not actually PROVIDE the virtual.
2744 # Filter out any such false matches here.
2745 if not InternalPackageSet(initial_atoms=(atom,)
2746 ).findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2750 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2751 cache_key = (root, atom, onlydeps)
2752 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2755 if pkg and not existing:
2756 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2757 if existing and existing == pkg:
2758 # Update the cache to reflect that the
2759 # package has been added to the graph.
2761 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2763 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2764 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2767 settings = pkg.root_config.settings
2768 if self._pkg_visibility_check(pkg) and \
2769 not (pkg.installed and pkg.masks):
2770 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2773 def _want_installed_pkg(self, pkg):
2775 Given an installed package returned from select_pkg, return
2776 True if the user has not explicitly requested for this package
2777 to be replaced (typically via an atom on the command line).
2779 if "selective" not in self._dynamic_config.myparams and \
2780 pkg.root == self._frozen_config.target_root:
2782 next(self._iter_atoms_for_pkg(pkg))
2783 except StopIteration:
2785 except portage.exception.InvalidDependString:
2791 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2792 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2794 default_selection = (pkg, existing)
2796 if self._dynamic_config._autounmask is True:
2797 if pkg is not None and \
2799 not self._want_installed_pkg(pkg):
2802 for only_use_changes in True, False:
2807 self._wrapped_select_pkg_highest_available_imp(
2808 root, atom, onlydeps=onlydeps,
2809 allow_use_changes=True,
2810 allow_unstable_keywords=(not only_use_changes),
2811 allow_license_changes=(not only_use_changes))
2813 if pkg is not None and \
2815 not self._want_installed_pkg(pkg):
2818 if self._dynamic_config._need_restart:
2822 # This ensures that we can fall back to an installed package
2823 # that may have been rejected in the autounmask path above.
2824 return default_selection
2826 return pkg, existing
2828 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False):
2833 if self._frozen_config.myopts.get('--autounmask', 'n') is not True:
2836 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
2837 root_config = self._frozen_config.roots[pkg.root]
2838 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
2840 masked_by_unstable_keywords = False
2841 missing_licenses = None
2842 masked_by_something_else = False
2844 for reason in mreasons:
2845 hint = reason.unmask_hint
2848 masked_by_something_else = True
2849 elif hint.key == "unstable keyword":
2850 masked_by_unstable_keywords = True
2851 elif hint.key == "license":
2852 missing_licenses = hint.value
2854 masked_by_something_else = True
2856 if masked_by_something_else:
2859 if pkg in self._dynamic_config._needed_unstable_keywords:
2860 #If the package is already keyworded, remove the mask.
2861 masked_by_unstable_keywords = False
2863 if missing_licenses:
2864 #If the needed licenses are already unmasked, remove the mask.
2865 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
2867 if not (masked_by_unstable_keywords or missing_licenses):
2868 #Package has already been unmasked.
2871 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
2872 (missing_licenses and not allow_license_changes):
2873 #We are not allowed to do the needed changes.
2876 if masked_by_unstable_keywords:
2877 self._dynamic_config._needed_unstable_keywords.add(pkg)
2879 if missing_licenses:
2880 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
2884 def _pkg_use_enabled(self, pkg, target_use=None):
2886 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
2887 If target_use is given, the need changes are computed to make the package useable.
2888 Example: target_use = { "foo": True, "bar": False }
2889 The flags target_use must be in the pkg's IUSE.
2891 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
2893 if target_use is None:
2894 if needed_use_config_change is None:
2895 return pkg.use.enabled
2897 return needed_use_config_change[0]
2899 if needed_use_config_change is not None:
2900 old_use = needed_use_config_change[0]
2902 old_changes = needed_use_config_change[1]
2903 new_changes = old_changes.copy()
2905 old_use = pkg.use.enabled
2910 for flag, state in target_use.items():
2912 if flag not in old_use:
2913 if new_changes.get(flag) == False:
2915 new_changes[flag] = True
2919 if new_changes.get(flag) == True:
2921 new_changes[flag] = False
2922 new_use.update(old_use.difference(target_use))
2924 def want_restart_for_use_change(pkg, new_use):
2925 if pkg not in self._dynamic_config.digraph.nodes:
2928 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
2929 dep = pkg.metadata[key]
2930 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2931 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2933 if old_val != new_val:
2936 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
2937 if not parent_atoms:
2940 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
2941 for ppkg, atom in parent_atoms:
2942 if not atom.use or \
2943 not atom.use.required.intersection(changes):
2950 if new_changes != old_changes:
2951 #Don't do the change if it violates REQUIRED_USE.
2952 required_use = pkg.metadata["REQUIRED_USE"]
2953 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2954 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2957 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
2958 if want_restart_for_use_change(pkg, new_use):
2959 self._dynamic_config._need_restart = True
2962 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
2963 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False):
2964 root_config = self._frozen_config.roots[root]
2965 pkgsettings = self._frozen_config.pkgsettings[root]
2966 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2967 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2968 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2969 # List of acceptable packages, ordered by type preference.
2970 matched_packages = []
2971 matched_pkgs_ignore_use = []
2972 highest_version = None
2973 if not isinstance(atom, portage.dep.Atom):
2974 atom = portage.dep.Atom(atom)
2976 atom_set = InternalPackageSet(initial_atoms=(atom,))
2977 existing_node = None
2979 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
2980 usepkg = "--usepkg" in self._frozen_config.myopts
2981 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2982 empty = "empty" in self._dynamic_config.myparams
2983 selective = "selective" in self._dynamic_config.myparams
2985 noreplace = "--noreplace" in self._frozen_config.myopts
2986 avoid_update = "--update" not in self._frozen_config.myopts
2987 dont_miss_updates = "--update" in self._frozen_config.myopts
2988 use_ebuild_visibility = self._frozen_config.myopts.get(
2989 '--use-ebuild-visibility', 'n') != 'n'
2990 # Behavior of the "selective" parameter depends on
2991 # whether or not a package matches an argument atom.
2992 # If an installed package provides an old-style
2993 # virtual that is no longer provided by an available
2994 # package, the installed package may match an argument
2995 # atom even though none of the available packages do.
2996 # Therefore, "selective" logic does not consider
2997 # whether or not an installed package matches an
2998 # argument atom. It only considers whether or not
2999 # available packages match argument atoms, which is
3000 # represented by the found_available_arg flag.
3001 found_available_arg = False
3002 packages_with_invalid_use_config = []
3003 for find_existing_node in True, False:
3006 for db, pkg_type, built, installed, db_keys in dbs:
3009 if installed and not find_existing_node:
3010 want_reinstall = reinstall or empty or \
3011 (found_available_arg and not selective)
3012 if want_reinstall and matched_packages:
3015 # Ignore USE deps for the initial match since we want to
3016 # ensure that updates aren't missed solely due to the user's
3017 # USE configuration.
3018 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3020 if pkg in self._dynamic_config._runtime_pkg_mask:
3021 # The package has been masked by the backtracking logic
3024 if not pkg.installed and \
3025 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3026 modified_use=self._pkg_use_enabled(pkg)):
3029 if packages_with_invalid_use_config and \
3030 (not pkg.installed or dont_miss_updates):
3031 # Check if a higher version was rejected due to user
3032 # USE configuration. The packages_with_invalid_use_config
3033 # list only contains unbuilt ebuilds since USE can't
3034 # be changed for built packages.
3035 higher_version_rejected = False
3036 for rejected in packages_with_invalid_use_config:
3037 if rejected.cp != pkg.cp:
3040 higher_version_rejected = True
3042 if higher_version_rejected:
3046 # Make --noreplace take precedence over --newuse.
3047 if not pkg.installed and noreplace and \
3048 cpv in vardb.match(atom):
3049 inst_pkg = self._pkg(pkg.cpv, "installed",
3050 root_config, installed=True)
3051 if inst_pkg.visible:
3052 # If the installed version is masked, it may
3053 # be necessary to look at lower versions,
3054 # in case there is a visible downgrade.
3056 reinstall_for_flags = None
3058 if not pkg.installed or \
3059 (matched_packages and not avoid_update):
3060 # Only enforce visibility on installed packages
3061 # if there is at least one other visible package
3062 # available. By filtering installed masked packages
3063 # here, packages that have been masked since they
3064 # were installed can be automatically downgraded
3065 # to an unmasked version. NOTE: This code needs to
3066 # be consistent with masking behavior inside
3067 # _dep_check_composite_db, in order to prevent
3068 # incorrect choices in || deps like bug #351828.
3070 if not self._pkg_visibility_check(pkg, \
3071 allow_unstable_keywords=allow_unstable_keywords,
3072 allow_license_changes=allow_license_changes):
3075 # Enable upgrade or downgrade to a version
3076 # with visible KEYWORDS when the installed
3077 # version is masked by KEYWORDS, but never
3078 # reinstall the same exact version only due
3079 # to a KEYWORDS mask. See bug #252167.
3081 if pkg.type_name != "ebuild" and matched_packages:
3082 # Don't re-install a binary package that is
3083 # identical to the currently installed package
3084 # (see bug #354441).
3085 identical_binary = False
3086 if usepkg and pkg.installed:
3087 for selected_pkg in matched_packages:
3088 if selected_pkg.type_name == "binary" and \
3089 selected_pkg.cpv == pkg.cpv and \
3090 selected_pkg.metadata.get('BUILD_TIME') == \
3091 pkg.metadata.get('BUILD_TIME'):
3092 identical_binary = True
3095 if not identical_binary:
3096 # If the ebuild no longer exists or it's
3097 # keywords have been dropped, reject built
3098 # instances (installed or binary).
3099 # If --usepkgonly is enabled, assume that
3100 # the ebuild status should be ignored.
3101 if not use_ebuild_visibility and usepkgonly:
3102 if pkg.installed and pkg.masks:
3107 pkg.cpv, "ebuild", root_config)
3108 except portage.exception.PackageNotFound:
3111 if not self._pkg_visibility_check(pkg_eb, \
3112 allow_unstable_keywords=allow_unstable_keywords,
3113 allow_license_changes=allow_license_changes):
3116 # Calculation of USE for unbuilt ebuilds is relatively
3117 # expensive, so it is only performed lazily, after the
3118 # above visibility checks are complete.
3121 if root == self._frozen_config.target_root:
3123 myarg = next(self._iter_atoms_for_pkg(pkg))
3124 except StopIteration:
3126 except portage.exception.InvalidDependString:
3128 # masked by corruption
3130 if not installed and myarg:
3131 found_available_arg = True
3133 if atom.unevaluated_atom.use:
3134 #Make sure we don't miss a 'missing IUSE'.
3135 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3136 # Don't add this to packages_with_invalid_use_config
3137 # since IUSE cannot be adjusted by the user.
3142 matched_pkgs_ignore_use.append(pkg)
3143 if allow_use_changes:
3145 for flag in atom.use.enabled:
3146 target_use[flag] = True
3147 for flag in atom.use.disabled:
3148 target_use[flag] = False
3149 use = self._pkg_use_enabled(pkg, target_use)
3151 use = self._pkg_use_enabled(pkg)
3154 can_adjust_use = not pkg.built
3155 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3156 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3158 if atom.use.enabled:
3159 if atom.use.enabled.intersection(missing_disabled):
3161 can_adjust_use = False
3162 need_enabled = atom.use.enabled.difference(use)
3164 need_enabled = need_enabled.difference(missing_enabled)
3168 if pkg.use.mask.intersection(need_enabled):
3169 can_adjust_use = False
3171 if atom.use.disabled:
3172 if atom.use.disabled.intersection(missing_enabled):
3174 can_adjust_use = False
3175 need_disabled = atom.use.disabled.intersection(use)
3177 need_disabled = need_disabled.difference(missing_disabled)
3181 if pkg.use.force.difference(
3182 pkg.use.mask).intersection(need_disabled):
3183 can_adjust_use = False
3187 # Above we must ensure that this package has
3188 # absolutely no use.force, use.mask, or IUSE
3189 # issues that the user typically can't make
3190 # adjustments to solve (see bug #345979).
3191 # FIXME: Conditional USE deps complicate
3192 # issues. This code currently excludes cases
3193 # in which the user can adjust the parent
3194 # package's USE in order to satisfy the dep.
3195 packages_with_invalid_use_config.append(pkg)
3198 if pkg.cp == atom_cp:
3199 if highest_version is None:
3200 highest_version = pkg
3201 elif pkg > highest_version:
3202 highest_version = pkg
3203 # At this point, we've found the highest visible
3204 # match from the current repo. Any lower versions
3205 # from this repo are ignored, so this so the loop
3206 # will always end with a break statement below
3208 if find_existing_node:
3209 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3212 # Use PackageSet.findAtomForPackage()
3213 # for PROVIDE support.
3214 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3215 if highest_version and \
3216 e_pkg.cp == atom_cp and \
3217 e_pkg < highest_version and \
3218 e_pkg.slot_atom != highest_version.slot_atom:
3219 # There is a higher version available in a
3220 # different slot, so this existing node is
3224 matched_packages.append(e_pkg)
3225 existing_node = e_pkg
3227 # Compare built package to current config and
3228 # reject the built package if necessary.
3229 if built and (not installed or matched_pkgs_ignore_use) and \
3230 ("--newuse" in self._frozen_config.myopts or \
3231 "--reinstall" in self._frozen_config.myopts or \
3232 "--binpkg-respect-use" in self._frozen_config.myopts):
3233 iuses = pkg.iuse.all
3234 old_use = self._pkg_use_enabled(pkg)
3236 pkgsettings.setcpv(myeb)
3238 pkgsettings.setcpv(pkg)
3239 now_use = pkgsettings["PORTAGE_USE"].split()
3240 forced_flags = set()
3241 forced_flags.update(pkgsettings.useforce)
3242 forced_flags.update(pkgsettings.usemask)
3244 if myeb and not usepkgonly:
3245 cur_iuse = myeb.iuse.all
3246 if self._reinstall_for_flags(forced_flags,
3250 # Compare current config to installed package
3251 # and do not reinstall if possible.
3252 if not installed and \
3253 ("--newuse" in self._frozen_config.myopts or \
3254 "--reinstall" in self._frozen_config.myopts) and \
3255 cpv in vardb.match(atom):
3256 forced_flags = set()
3257 forced_flags.update(pkg.use.force)
3258 forced_flags.update(pkg.use.mask)
3259 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
3260 old_use = inst_pkg.use.enabled
3261 old_iuse = inst_pkg.iuse.all
3262 cur_use = self._pkg_use_enabled(pkg)
3263 cur_iuse = pkg.iuse.all
3264 reinstall_for_flags = \
3265 self._reinstall_for_flags(
3266 forced_flags, old_use, old_iuse,
3268 if reinstall_for_flags:
3272 matched_packages.append(pkg)
3273 if reinstall_for_flags:
3274 self._dynamic_config._reinstall_nodes[pkg] = \
3278 if not matched_packages:
3281 if "--debug" in self._frozen_config.myopts:
3282 for pkg in matched_packages:
3283 portage.writemsg("%s %s\n" % \
3284 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3286 # Filter out any old-style virtual matches if they are
3287 # mixed with new-style virtual matches.
3289 if len(matched_packages) > 1 and \
3290 "virtual" == portage.catsplit(cp)[0]:
3291 for pkg in matched_packages:
3294 # Got a new-style virtual, so filter
3295 # out any old-style virtuals.
3296 matched_packages = [pkg for pkg in matched_packages \
3300 if existing_node is not None and \
3301 existing_node in matched_packages:
3302 return existing_node, existing_node
3304 if len(matched_packages) > 1:
3305 if rebuilt_binaries:
3308 for pkg in matched_packages:
3313 if built_pkg is not None and inst_pkg is not None:
3314 # Only reinstall if binary package BUILD_TIME is
3315 # non-empty, in order to avoid cases like to
3316 # bug #306659 where BUILD_TIME fields are missing
3317 # in local and/or remote Packages file.
3319 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3320 except (KeyError, ValueError):
3324 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3325 except (KeyError, ValueError):
3326 installed_timestamp = 0
3328 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3329 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3330 if built_timestamp and \
3331 built_timestamp > installed_timestamp and \
3332 built_timestamp >= minimal_timestamp:
3333 return built_pkg, existing_node
3335 #Don't care if the binary has an older BUILD_TIME than the installed
3336 #package. This is for closely tracking a binhost.
3337 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3339 if built_timestamp and \
3340 built_timestamp != installed_timestamp:
3341 return built_pkg, existing_node
3343 for pkg in matched_packages:
3344 if pkg.installed and pkg.invalid:
3345 matched_packages = [x for x in \
3346 matched_packages if x is not pkg]
3349 for pkg in matched_packages:
3350 if pkg.installed and self._pkg_visibility_check(pkg, \
3351 allow_unstable_keywords=allow_unstable_keywords,
3352 allow_license_changes=allow_license_changes):
3353 return pkg, existing_node
3355 bestmatch = portage.best(
3356 [pkg.cpv for pkg in matched_packages \
3357 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3358 allow_license_changes=allow_license_changes)])
3360 # all are masked, so ignore visibility
3361 bestmatch = portage.best(
3362 [pkg.cpv for pkg in matched_packages])
3363 matched_packages = [pkg for pkg in matched_packages \
3364 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3366 # ordered by type preference ("ebuild" type is the last resort)
3367 return matched_packages[-1], existing_node
3369 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3371 Select packages that have already been added to the graph or
3372 those that are installed and have not been scheduled for
3375 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3376 matches = graph_db.match_pkgs(atom)
3379 pkg = matches[-1] # highest match
3380 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3381 return pkg, in_graph
3383 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3385 Select packages that are installed.
3387 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3388 matches = vardb.match_pkgs(atom)
3391 if len(matches) > 1:
3392 unmasked = [pkg for pkg in matches if \
3393 self._pkg_visibility_check(pkg)]
3395 if len(unmasked) == 1:
3398 # Account for packages with masks (like KEYWORDS masks)
3399 # that are usually ignored in visibility checks for
3400 # installed packages, in order to handle cases like
3402 unmasked = [pkg for pkg in matches if not pkg.masks]
3405 pkg = matches[-1] # highest match
3406 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3407 return pkg, in_graph
3409 def _complete_graph(self, required_sets=None):
3411 Add any deep dependencies of required sets (args, system, world) that
3412 have not been pulled into the graph yet. This ensures that the graph
3413 is consistent such that initially satisfied deep dependencies are not
3414 broken in the new graph. Initially unsatisfied dependencies are
3415 irrelevant since we only want to avoid breaking dependencies that are
3416 initially satisfied.
3418 Since this method can consume enough time to disturb users, it is
3419 currently only enabled by the --complete-graph option.
3421 @param required_sets: contains required sets (currently only used
3422 for depclean and prune removal operations)
3423 @type required_sets: dict
3425 if "--buildpkgonly" in self._frozen_config.myopts or \
3426 "recurse" not in self._dynamic_config.myparams:
3429 if "complete" not in self._dynamic_config.myparams:
3430 # Automatically enable complete mode if there are any
3431 # downgrades, since they often break dependencies
3432 # (like in bug #353613).
3433 have_downgrade = False
3434 for node in self._dynamic_config.digraph:
3435 if not isinstance(node, Package) or \
3436 node.operation != "merge":
3438 vardb = self._frozen_config.roots[
3439 node.root].trees["vartree"].dbapi
3440 inst_pkg = vardb.match_pkgs(node.slot_atom)
3441 if inst_pkg and inst_pkg[0] > node:
3442 have_downgrade = True
3446 self._dynamic_config.myparams["complete"] = True
3448 # Skip complete graph mode, in order to avoid consuming
3449 # enough time to disturb users.
3454 # Put the depgraph into a mode that causes it to only
3455 # select packages that have already been added to the
3456 # graph or those that are installed and have not been
3457 # scheduled for replacement. Also, toggle the "deep"
3458 # parameter so that all dependencies are traversed and
3460 self._select_atoms = self._select_atoms_from_graph
3461 if "remove" in self._dynamic_config.myparams:
3462 self._select_package = self._select_pkg_from_installed
3464 self._select_package = self._select_pkg_from_graph
3465 already_deep = self._dynamic_config.myparams.get("deep") is True
3466 if not already_deep:
3467 self._dynamic_config.myparams["deep"] = True
3469 # Invalidate the package selection cache, since
3470 # _select_package has just changed implementations.
3471 for trees in self._dynamic_config._filtered_trees.values():
3472 trees["porttree"].dbapi._clear_cache()
3474 args = self._dynamic_config._initial_arg_list[:]
3475 for root in self._frozen_config.roots:
3476 if root != self._frozen_config.target_root and \
3477 "remove" in self._dynamic_config.myparams:
3478 # Only pull in deps for the relevant root.
3480 depgraph_sets = self._dynamic_config.sets[root]
3481 required_set_names = self._frozen_config._required_set_names.copy()
3482 remaining_args = required_set_names.copy()
3483 if required_sets is None or root not in required_sets:
3486 # Removal actions may override sets with temporary
3487 # replacements that have had atoms removed in order
3488 # to implement --deselect behavior.
3489 required_set_names = set(required_sets[root])
3490 depgraph_sets.sets.clear()
3491 depgraph_sets.sets.update(required_sets[root])
3492 if "remove" not in self._dynamic_config.myparams and \
3493 root == self._frozen_config.target_root and \
3494 (already_deep or "empty" in self._dynamic_config.myparams):
3495 remaining_args.difference_update(depgraph_sets.sets)
3496 if not remaining_args and \
3497 not self._dynamic_config._ignored_deps and \
3498 not self._dynamic_config._dep_stack:
3500 root_config = self._frozen_config.roots[root]
3501 for s in required_set_names:
3502 pset = depgraph_sets.sets.get(s)
3504 pset = root_config.sets[s]
3505 atom = SETPREFIX + s
3506 args.append(SetArg(arg=atom, pset=pset,
3507 root_config=root_config))
3509 self._set_args(args)
3510 for arg in self._expand_set_args(args, add_to_digraph=True):
3511 for atom in arg.pset.getAtoms():
3512 self._dynamic_config._dep_stack.append(
3513 Dependency(atom=atom, root=arg.root_config.root,
3517 if self._dynamic_config._ignored_deps:
3518 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
3519 self._dynamic_config._ignored_deps = []
3520 if not self._create_graph(allow_unsatisfied=True):
3522 # Check the unsatisfied deps to see if any initially satisfied deps
3523 # will become unsatisfied due to an upgrade. Initially unsatisfied
3524 # deps are irrelevant since we only want to avoid breaking deps
3525 # that are initially satisfied.
3526 while self._dynamic_config._unsatisfied_deps:
3527 dep = self._dynamic_config._unsatisfied_deps.pop()
3528 vardb = self._frozen_config.roots[
3529 dep.root].trees["vartree"].dbapi
3530 matches = vardb.match_pkgs(dep.atom)
3532 self._dynamic_config._initially_unsatisfied_deps.append(dep)
3534 # An scheduled installation broke a deep dependency.
3535 # Add the installed package to the graph so that it
3536 # will be appropriately reported as a slot collision
3537 # (possibly solvable via backtracking).
3538 pkg = matches[-1] # highest match
3539 if not self._add_pkg(pkg, dep):
3541 if not self._create_graph(allow_unsatisfied=True):
3545 def _pkg(self, cpv, type_name, root_config, installed=False,
3548 Get a package instance from the cache, or create a new
3549 one if necessary. Raises PackageNotFound from aux_get if it
3550 failures for some reason (package does not exist or is
3554 if installed or onlydeps:
3555 operation = "nomerge"
3556 # Ensure that we use the specially optimized RootConfig instance
3557 # that refers to FakeVartree instead of the real vartree.
3558 root_config = self._frozen_config.roots[root_config.root]
3559 pkg = self._frozen_config._pkg_cache.get(
3560 (type_name, root_config.root, cpv, operation))
3561 if pkg is None and onlydeps and not installed:
3562 # Maybe it already got pulled in as a "merge" node.
3563 pkg = self._dynamic_config.mydbapi[root_config.root].get(
3564 (type_name, root_config.root, cpv, 'merge'))
3567 tree_type = self.pkg_tree_map[type_name]
3568 db = root_config.trees[tree_type].dbapi
3569 db_keys = list(self._frozen_config._trees_orig[root_config.root][
3570 tree_type].dbapi._aux_cache_keys)
3572 metadata = zip(db_keys, db.aux_get(cpv, db_keys))
3574 raise portage.exception.PackageNotFound(cpv)
3575 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
3576 installed=installed, metadata=metadata, onlydeps=onlydeps,
3577 root_config=root_config, type_name=type_name)
3578 self._frozen_config._pkg_cache[pkg] = pkg
3580 if not self._pkg_visibility_check(pkg) and \
3581 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
3582 slot_key = (pkg.root, pkg.slot_atom)
3583 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
3584 if other_pkg is None or pkg > other_pkg:
3585 self._frozen_config._highest_license_masked[slot_key] = pkg
3589 def _validate_blockers(self):
3590 """Remove any blockers from the digraph that do not match any of the
3591 packages within the graph. If necessary, create hard deps to ensure
3592 correct merge order such that mutually blocking packages are never
3593 installed simultaneously."""
3595 if "--buildpkgonly" in self._frozen_config.myopts or \
3596 "--nodeps" in self._frozen_config.myopts:
3599 complete = "complete" in self._dynamic_config.myparams
3600 deep = "deep" in self._dynamic_config.myparams
3603 # Pull in blockers from all installed packages that haven't already
3604 # been pulled into the depgraph. This is not enabled by default
3605 # due to the performance penalty that is incurred by all the
3606 # additional dep_check calls that are required.
3608 # For installed packages, always ignore blockers from DEPEND since
3609 # only runtime dependencies should be relevant for packages that
3610 # are already built.
3611 dep_keys = ["RDEPEND", "PDEPEND"]
3612 for myroot in self._frozen_config.trees:
3613 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
3614 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
3615 pkgsettings = self._frozen_config.pkgsettings[myroot]
3616 root_config = self._frozen_config.roots[myroot]
3617 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
3618 final_db = self._dynamic_config.mydbapi[myroot]
3620 blocker_cache = BlockerCache(myroot, vardb)
3621 stale_cache = set(blocker_cache)
3624 stale_cache.discard(cpv)
3625 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
3627 pkg in self._dynamic_config._traversed_pkg_deps
3629 # Check for masked installed packages. Only warn about
3630 # packages that are in the graph in order to avoid warning
3631 # about those that will be automatically uninstalled during
3632 # the merge process or by --depclean. Always warn about
3633 # packages masked by license, since the user likely wants
3634 # to adjust ACCEPT_LICENSE.
3636 if not self._pkg_visibility_check(pkg) and \
3637 (pkg_in_graph or 'LICENSE' in pkg.masks):
3638 self._dynamic_config._masked_installed.add(pkg)
3640 self._check_masks(pkg)
3642 blocker_atoms = None
3648 self._dynamic_config._blocker_parents.child_nodes(pkg))
3653 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
3657 # Select just the runtime blockers.
3658 blockers = [blocker for blocker in blockers \
3659 if blocker.priority.runtime or \
3660 blocker.priority.runtime_post]
3661 if blockers is not None:
3662 blockers = set(blocker.atom for blocker in blockers)
3664 # If this node has any blockers, create a "nomerge"
3665 # node for it so that they can be enforced.
3666 self._spinner_update()
3667 blocker_data = blocker_cache.get(cpv)
3668 if blocker_data is not None and \
3669 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3672 # If blocker data from the graph is available, use
3673 # it to validate the cache and update the cache if
3675 if blocker_data is not None and \
3676 blockers is not None:
3677 if not blockers.symmetric_difference(
3678 blocker_data.atoms):
3682 if blocker_data is None and \
3683 blockers is not None:
3684 # Re-use the blockers from the graph.
3685 blocker_atoms = sorted(blockers)
3686 counter = long(pkg.metadata["COUNTER"])
3688 blocker_cache.BlockerData(counter, blocker_atoms)
3689 blocker_cache[pkg.cpv] = blocker_data
3693 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
3695 # Use aux_get() to trigger FakeVartree global
3696 # updates on *DEPEND when appropriate.
3697 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3698 # It is crucial to pass in final_db here in order to
3699 # optimize dep_check calls by eliminating atoms via
3700 # dep_wordreduce and dep_eval calls.
3702 success, atoms = portage.dep_check(depstr,
3703 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
3704 trees=self._dynamic_config._graph_trees, myroot=myroot)
3707 except Exception as e:
3708 # This is helpful, for example, if a ValueError
3709 # is thrown from cpv_expand due to multiple
3710 # matches (this can happen if an atom lacks a
3712 show_invalid_depstring_notice(
3713 pkg, depstr, str(e))
3717 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3718 if replacement_pkg and \
3719 replacement_pkg[0].operation == "merge":
3720 # This package is being replaced anyway, so
3721 # ignore invalid dependencies so as not to
3722 # annoy the user too much (otherwise they'd be
3723 # forced to manually unmerge it first).
3725 show_invalid_depstring_notice(pkg, depstr, atoms)
3727 blocker_atoms = [myatom for myatom in atoms \
3729 blocker_atoms.sort()
3730 counter = long(pkg.metadata["COUNTER"])
3731 blocker_cache[cpv] = \
3732 blocker_cache.BlockerData(counter, blocker_atoms)
3735 for atom in blocker_atoms:
3736 blocker = Blocker(atom=atom,
3737 eapi=pkg.metadata["EAPI"],
3738 priority=self._priority(runtime=True),
3740 self._dynamic_config._blocker_parents.add(blocker, pkg)
3741 except portage.exception.InvalidAtom as e:
3742 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3743 show_invalid_depstring_notice(
3744 pkg, depstr, "Invalid Atom: %s" % (e,))
3746 for cpv in stale_cache:
3747 del blocker_cache[cpv]
3748 blocker_cache.flush()
3751 # Discard any "uninstall" tasks scheduled by previous calls
3752 # to this method, since those tasks may not make sense given
3753 # the current graph state.
3754 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
3755 if previous_uninstall_tasks:
3756 self._dynamic_config._blocker_uninstalls = digraph()
3757 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
3759 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
3760 self._spinner_update()
3761 root_config = self._frozen_config.roots[blocker.root]
3762 virtuals = root_config.settings.getvirtuals()
3763 myroot = blocker.root
3764 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
3765 final_db = self._dynamic_config.mydbapi[myroot]
3767 provider_virtual = False
3768 if blocker.cp in virtuals and \
3769 not self._have_new_virt(blocker.root, blocker.cp):
3770 provider_virtual = True
3772 # Use this to check PROVIDE for each matched package
3774 atom_set = InternalPackageSet(
3775 initial_atoms=[blocker.atom])
3777 if provider_virtual:
3779 for provider_entry in virtuals[blocker.cp]:
3780 atoms.append(Atom(blocker.atom.replace(
3781 blocker.cp, provider_entry.cp, 1)))
3783 atoms = [blocker.atom]
3785 blocked_initial = set()
3787 for pkg in initial_db.match_pkgs(atom):
3788 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3789 blocked_initial.add(pkg)
3791 blocked_final = set()
3793 for pkg in final_db.match_pkgs(atom):
3794 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3795 blocked_final.add(pkg)
3797 if not blocked_initial and not blocked_final:
3798 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
3799 self._dynamic_config._blocker_parents.remove(blocker)
3800 # Discard any parents that don't have any more blockers.
3801 for pkg in parent_pkgs:
3802 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
3803 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3804 self._dynamic_config._blocker_parents.remove(pkg)
3806 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3807 unresolved_blocks = False
3808 depends_on_order = set()
3809 for pkg in blocked_initial:
3810 if pkg.slot_atom == parent.slot_atom and \
3811 not blocker.atom.blocker.overlap.forbid:
3812 # New !!atom blockers do not allow temporary
3813 # simulaneous installation, so unlike !atom
3814 # blockers, !!atom blockers aren't ignored
3815 # when they match other packages occupying
3818 if parent.installed:
3819 # Two currently installed packages conflict with
3820 # eachother. Ignore this case since the damage
3821 # is already done and this would be likely to
3822 # confuse users if displayed like a normal blocker.
3825 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3827 if parent.operation == "merge":
3828 # Maybe the blocked package can be replaced or simply
3829 # unmerged to resolve this block.
3830 depends_on_order.add((pkg, parent))
3832 # None of the above blocker resolutions techniques apply,
3833 # so apparently this one is unresolvable.
3834 unresolved_blocks = True
3835 for pkg in blocked_final:
3836 if pkg.slot_atom == parent.slot_atom and \
3837 not blocker.atom.blocker.overlap.forbid:
3838 # New !!atom blockers do not allow temporary
3839 # simulaneous installation, so unlike !atom
3840 # blockers, !!atom blockers aren't ignored
3841 # when they match other packages occupying
3844 if parent.operation == "nomerge" and \
3845 pkg.operation == "nomerge":
3846 # This blocker will be handled the next time that a
3847 # merge of either package is triggered.
3850 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3852 # Maybe the blocking package can be
3853 # unmerged to resolve this block.
3854 if parent.operation == "merge" and pkg.installed:
3855 depends_on_order.add((pkg, parent))
3857 elif parent.operation == "nomerge":
3858 depends_on_order.add((parent, pkg))
3860 # None of the above blocker resolutions techniques apply,
3861 # so apparently this one is unresolvable.
3862 unresolved_blocks = True
3864 # Make sure we don't unmerge any package that have been pulled
3866 if not unresolved_blocks and depends_on_order:
3867 for inst_pkg, inst_task in depends_on_order:
3868 if self._dynamic_config.digraph.contains(inst_pkg) and \
3869 self._dynamic_config.digraph.parent_nodes(inst_pkg):
3870 unresolved_blocks = True
3873 if not unresolved_blocks and depends_on_order:
3874 for inst_pkg, inst_task in depends_on_order:
3875 uninst_task = Package(built=inst_pkg.built,
3876 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3877 metadata=inst_pkg.metadata,
3878 operation="uninstall",
3879 root_config=inst_pkg.root_config,
3880 type_name=inst_pkg.type_name)
3881 # Enforce correct merge order with a hard dep.
3882 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
3883 priority=BlockerDepPriority.instance)
3884 # Count references to this blocker so that it can be
3885 # invalidated after nodes referencing it have been
3887 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
3888 if not unresolved_blocks and not depends_on_order:
3889 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
3890 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
3891 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
3892 self._dynamic_config._blocker_parents.remove(blocker)
3893 if not self._dynamic_config._blocker_parents.child_nodes(parent):
3894 self._dynamic_config._blocker_parents.remove(parent)
3895 if unresolved_blocks:
3896 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
3900 def _accept_blocker_conflicts(self):
3902 for x in ("--buildpkgonly", "--fetchonly",
3903 "--fetch-all-uri", "--nodeps"):
3904 if x in self._frozen_config.myopts:
3909 def _merge_order_bias(self, mygraph):
3911 For optimal leaf node selection, promote deep system runtime deps and
3912 order nodes from highest to lowest overall reference count.
3916 for node in mygraph.order:
3917 node_info[node] = len(mygraph.parent_nodes(node))
3918 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
3920 def cmp_merge_preference(node1, node2):
3922 if node1.operation == 'uninstall':
3923 if node2.operation == 'uninstall':
3927 if node2.operation == 'uninstall':
3928 if node1.operation == 'uninstall':
3932 node1_sys = node1 in deep_system_deps
3933 node2_sys = node2 in deep_system_deps
3934 if node1_sys != node2_sys:
3939 return node_info[node2] - node_info[node1]
3941 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3943 def altlist(self, reversed=False):
3945 while self._dynamic_config._serialized_tasks_cache is None:
3946 self._resolve_conflicts()
3948 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3949 self._serialize_tasks()
3950 except self._serialize_tasks_retry:
3953 retlist = self._dynamic_config._serialized_tasks_cache[:]
3958 def schedulerGraph(self):
3960 The scheduler graph is identical to the normal one except that
3961 uninstall edges are reversed in specific cases that require
3962 conflicting packages to be temporarily installed simultaneously.
3963 This is intended for use by the Scheduler in it's parallelization
3964 logic. It ensures that temporary simultaneous installation of
3965 conflicting packages is avoided when appropriate (especially for
3966 !!atom blockers), but allowed in specific cases that require it.
3968 Note that this method calls break_refs() which alters the state of
3969 internal Package instances such that this depgraph instance should
3970 not be used to perform any more calculations.
3973 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
3974 mergelist = self.altlist()
3975 self.break_refs(mergelist)
3976 self.break_refs(self._dynamic_config._scheduler_graph.order)
3978 # Break DepPriority.satisfied attributes which reference
3979 # installed Package instances.
3980 for parents, children, node in \
3981 self._dynamic_config._scheduler_graph.nodes.values():
3982 for priorities in chain(parents.values(), children.values()):
3983 for priority in priorities:
3984 if priority.satisfied:
3985 priority.satisfied = True
3987 pkg_cache = self._frozen_config._pkg_cache
3988 graph = self._dynamic_config._scheduler_graph
3989 trees = self._frozen_config.trees
3990 pruned_pkg_cache = {}
3991 for pkg in pkg_cache:
3992 if pkg in graph or \
3993 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
3994 pruned_pkg_cache[pkg] = pkg
3997 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
3998 self.break_refs(trees[root]['vartree'].dbapi)
4000 self.break_refs(pruned_pkg_cache)
4002 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4006 def break_refs(self, nodes):
4008 Take a mergelist like that returned from self.altlist() and
4009 break any references that lead back to the depgraph. This is
4010 useful if you want to hold references to packages without
4011 also holding the depgraph on the heap.
4014 if hasattr(node, "root_config"):
4015 # The FakeVartree references the _package_cache which
4016 # references the depgraph. So that Package instances don't
4017 # hold the depgraph and FakeVartree on the heap, replace
4018 # the RootConfig that references the FakeVartree with the
4019 # original RootConfig instance which references the actual
4021 node.root_config = \
4022 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
4024 def _resolve_conflicts(self):
4025 if not self._complete_graph():
4026 raise self._unknown_internal_error()
4028 if not self._validate_blockers():
4029 self._dynamic_config._skip_restart = True
4030 raise self._unknown_internal_error()
4032 if self._dynamic_config._slot_collision_info:
4033 self._process_slot_conflicts()
4035 def _serialize_tasks(self):
4037 if "--debug" in self._frozen_config.myopts:
4038 writemsg("\ndigraph:\n\n", noiselevel=-1)
4039 self._dynamic_config.digraph.debug_print()
4040 writemsg("\n", noiselevel=-1)
4042 scheduler_graph = self._dynamic_config.digraph.copy()
4044 if '--nodeps' in self._frozen_config.myopts:
4045 # Preserve the package order given on the command line.
4046 return ([node for node in scheduler_graph \
4047 if isinstance(node, Package) \
4048 and node.operation == 'merge'], scheduler_graph)
4050 mygraph=self._dynamic_config.digraph.copy()
4052 removed_nodes = set()
4054 # Prune off all DependencyArg instances since they aren't
4055 # needed, and because of nested sets this is faster than doing
4056 # it with multiple digraph.root_nodes() calls below. This also
4057 # takes care of nested sets that have circular references,
4058 # which wouldn't be matched by digraph.root_nodes().
4059 for node in mygraph:
4060 if isinstance(node, DependencyArg):
4061 removed_nodes.add(node)
4063 mygraph.difference_update(removed_nodes)
4064 removed_nodes.clear()
4066 # Prune "nomerge" root nodes if nothing depends on them, since
4067 # otherwise they slow down merge order calculation. Don't remove
4068 # non-root nodes since they help optimize merge order in some cases
4069 # such as revdep-rebuild.
4072 for node in mygraph.root_nodes():
4073 if not isinstance(node, Package) or \
4074 node.installed or node.onlydeps:
4075 removed_nodes.add(node)
4077 self._spinner_update()
4078 mygraph.difference_update(removed_nodes)
4079 if not removed_nodes:
4081 removed_nodes.clear()
4082 self._merge_order_bias(mygraph)
4083 def cmp_circular_bias(n1, n2):
4085 RDEPEND is stronger than PDEPEND and this function
4086 measures such a strength bias within a circular
4087 dependency relationship.
4089 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4090 ignore_priority=priority_range.ignore_medium_soft)
4091 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4092 ignore_priority=priority_range.ignore_medium_soft)
4093 if n1_n2_medium == n2_n1_medium:
4098 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4100 # Contains uninstall tasks that have been scheduled to
4101 # occur after overlapping blockers have been installed.
4102 scheduled_uninstalls = set()
4103 # Contains any Uninstall tasks that have been ignored
4104 # in order to avoid the circular deps code path. These
4105 # correspond to blocker conflicts that could not be
4107 ignored_uninstall_tasks = set()
4108 have_uninstall_task = False
4109 complete = "complete" in self._dynamic_config.myparams
4112 def get_nodes(**kwargs):
4114 Returns leaf nodes excluding Uninstall instances
4115 since those should be executed as late as possible.
4117 return [node for node in mygraph.leaf_nodes(**kwargs) \
4118 if isinstance(node, Package) and \
4119 (node.operation != "uninstall" or \
4120 node in scheduled_uninstalls)]
4122 # sys-apps/portage needs special treatment if ROOT="/"
4123 running_root = self._frozen_config._running_root.root
4124 runtime_deps = InternalPackageSet(
4125 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4126 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4127 PORTAGE_PACKAGE_ATOM)
4128 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4129 PORTAGE_PACKAGE_ATOM)
4132 running_portage = running_portage[0]
4134 running_portage = None
4136 if replacement_portage:
4137 replacement_portage = replacement_portage[0]
4139 replacement_portage = None
4141 if replacement_portage == running_portage:
4142 replacement_portage = None
4144 if replacement_portage is not None and \
4145 (running_portage is None or \
4146 running_portage.cpv != replacement_portage.cpv or \
4147 '9999' in replacement_portage.cpv or \
4148 'git' in replacement_portage.inherited):
4149 # update from running_portage to replacement_portage asap
4150 asap_nodes.append(replacement_portage)
4152 if running_portage is not None:
4154 portage_rdepend = self._select_atoms_highest_available(
4155 running_root, running_portage.metadata["RDEPEND"],
4156 myuse=self._pkg_use_enabled(running_portage),
4157 parent=running_portage, strict=False)
4158 except portage.exception.InvalidDependString as e:
4159 portage.writemsg("!!! Invalid RDEPEND in " + \
4160 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4161 (running_root, running_portage.cpv, e), noiselevel=-1)
4163 portage_rdepend = {running_portage : []}
4164 for atoms in portage_rdepend.values():
4165 runtime_deps.update(atom for atom in atoms \
4166 if not atom.blocker)
4168 # Merge libc asap, in order to account for implicit
4169 # dependencies. See bug #303567.
4170 for root in (running_root,):
4171 libc_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
4172 portage.const.LIBC_PACKAGE_ATOM)
4174 libc_pkg = libc_pkg[0]
4175 if libc_pkg.operation == 'merge':
4176 # Only add a dep when the version changes.
4177 if not libc_pkg.root_config.trees[
4178 'vartree'].dbapi.cpv_exists(libc_pkg.cpv):
4180 # If there's also an os-headers upgrade, we need to
4181 # pull that in first. See bug #328317.
4182 os_headers_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
4183 portage.const.OS_HEADERS_PACKAGE_ATOM)
4185 os_headers_pkg = os_headers_pkg[0]
4186 if os_headers_pkg.operation == 'merge':
4187 # Only add a dep when the version changes.
4188 if not os_headers_pkg.root_config.trees[
4189 'vartree'].dbapi.cpv_exists(os_headers_pkg.cpv):
4190 asap_nodes.append(os_headers_pkg)
4192 asap_nodes.append(libc_pkg)
4194 def gather_deps(ignore_priority, mergeable_nodes,
4195 selected_nodes, node):
4197 Recursively gather a group of nodes that RDEPEND on
4198 eachother. This ensures that they are merged as a group
4199 and get their RDEPENDs satisfied as soon as possible.
4201 if node in selected_nodes:
4203 if node not in mergeable_nodes:
4205 if node == replacement_portage and \
4206 mygraph.child_nodes(node,
4207 ignore_priority=priority_range.ignore_medium_soft):
4208 # Make sure that portage always has all of it's
4209 # RDEPENDs installed first.
4211 selected_nodes.add(node)
4212 for child in mygraph.child_nodes(node,
4213 ignore_priority=ignore_priority):
4214 if not gather_deps(ignore_priority,
4215 mergeable_nodes, selected_nodes, child):
4219 def ignore_uninst_or_med(priority):
4220 if priority is BlockerDepPriority.instance:
4222 return priority_range.ignore_medium(priority)
4224 def ignore_uninst_or_med_soft(priority):
4225 if priority is BlockerDepPriority.instance:
4227 return priority_range.ignore_medium_soft(priority)
4229 tree_mode = "--tree" in self._frozen_config.myopts
4230 # Tracks whether or not the current iteration should prefer asap_nodes
4231 # if available. This is set to False when the previous iteration
4232 # failed to select any nodes. It is reset whenever nodes are
4233 # successfully selected.
4236 # Controls whether or not the current iteration should drop edges that
4237 # are "satisfied" by installed packages, in order to solve circular
4238 # dependencies. The deep runtime dependencies of installed packages are
4239 # not checked in this case (bug #199856), so it must be avoided
4240 # whenever possible.
4241 drop_satisfied = False
4243 # State of variables for successive iterations that loosen the
4244 # criteria for node selection.
4246 # iteration prefer_asap drop_satisfied
4251 # If no nodes are selected on the last iteration, it is due to
4252 # unresolved blockers or circular dependencies.
4254 while not mygraph.empty():
4255 self._spinner_update()
4256 selected_nodes = None
4257 ignore_priority = None
4258 if drop_satisfied or (prefer_asap and asap_nodes):
4259 priority_range = DepPrioritySatisfiedRange
4261 priority_range = DepPriorityNormalRange
4262 if prefer_asap and asap_nodes:
4263 # ASAP nodes are merged before their soft deps. Go ahead and
4264 # select root nodes here if necessary, since it's typical for
4265 # the parent to have been removed from the graph already.
4266 asap_nodes = [node for node in asap_nodes \
4267 if mygraph.contains(node)]
4268 for node in asap_nodes:
4269 if not mygraph.child_nodes(node,
4270 ignore_priority=priority_range.ignore_soft):
4271 selected_nodes = [node]
4272 asap_nodes.remove(node)
4274 if not selected_nodes and \
4275 not (prefer_asap and asap_nodes):
4276 for i in range(priority_range.NONE,
4277 priority_range.MEDIUM_SOFT + 1):
4278 ignore_priority = priority_range.ignore_priority[i]
4279 nodes = get_nodes(ignore_priority=ignore_priority)
4281 # If there is a mixture of merges and uninstalls,
4282 # do the uninstalls first.
4284 good_uninstalls = []
4286 if node.operation == "uninstall":
4287 good_uninstalls.append(node)
4290 nodes = good_uninstalls
4294 if ignore_priority is None and not tree_mode:
4295 # Greedily pop all of these nodes since no
4296 # relationship has been ignored. This optimization
4297 # destroys --tree output, so it's disabled in tree
4299 selected_nodes = nodes
4301 # For optimal merge order:
4302 # * Only pop one node.
4303 # * Removing a root node (node without a parent)
4304 # will not produce a leaf node, so avoid it.
4305 # * It's normal for a selected uninstall to be a
4306 # root node, so don't check them for parents.
4308 if node.operation == "uninstall" or \
4309 mygraph.parent_nodes(node):
4310 selected_nodes = [node]
4316 if not selected_nodes:
4317 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4319 mergeable_nodes = set(nodes)
4320 if prefer_asap and asap_nodes:
4322 for i in range(priority_range.SOFT,
4323 priority_range.MEDIUM_SOFT + 1):
4324 ignore_priority = priority_range.ignore_priority[i]
4326 if not mygraph.parent_nodes(node):
4328 selected_nodes = set()
4329 if gather_deps(ignore_priority,
4330 mergeable_nodes, selected_nodes, node):
4333 selected_nodes = None
4337 if prefer_asap and asap_nodes and not selected_nodes:
4338 # We failed to find any asap nodes to merge, so ignore
4339 # them for the next iteration.
4343 if selected_nodes and ignore_priority is not None:
4344 # Try to merge ignored medium_soft deps as soon as possible
4345 # if they're not satisfied by installed packages.
4346 for node in selected_nodes:
4347 children = set(mygraph.child_nodes(node))
4348 soft = children.difference(
4349 mygraph.child_nodes(node,
4350 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4351 medium_soft = children.difference(
4352 mygraph.child_nodes(node,
4354 DepPrioritySatisfiedRange.ignore_medium_soft))
4355 medium_soft.difference_update(soft)
4356 for child in medium_soft:
4357 if child in selected_nodes:
4359 if child in asap_nodes:
4361 asap_nodes.append(child)
4363 if selected_nodes and len(selected_nodes) > 1:
4364 if not isinstance(selected_nodes, list):
4365 selected_nodes = list(selected_nodes)
4366 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4368 if not selected_nodes and not myblocker_uninstalls.is_empty():
4369 # An Uninstall task needs to be executed in order to
4370 # avoid conflict if possible.
4373 priority_range = DepPrioritySatisfiedRange
4375 priority_range = DepPriorityNormalRange
4377 mergeable_nodes = get_nodes(
4378 ignore_priority=ignore_uninst_or_med)
4380 min_parent_deps = None
4383 for task in myblocker_uninstalls.leaf_nodes():
4384 # Do some sanity checks so that system or world packages
4385 # don't get uninstalled inappropriately here (only really
4386 # necessary when --complete-graph has not been enabled).
4388 if task in ignored_uninstall_tasks:
4391 if task in scheduled_uninstalls:
4392 # It's been scheduled but it hasn't
4393 # been executed yet due to dependence
4394 # on installation of blocking packages.
4397 root_config = self._frozen_config.roots[task.root]
4398 inst_pkg = self._pkg(task.cpv, "installed", root_config,
4401 if self._dynamic_config.digraph.contains(inst_pkg):
4404 forbid_overlap = False
4405 heuristic_overlap = False
4406 for blocker in myblocker_uninstalls.parent_nodes(task):
4407 if not eapi_has_strong_blocks(blocker.eapi):
4408 heuristic_overlap = True
4409 elif blocker.atom.blocker.overlap.forbid:
4410 forbid_overlap = True
4412 if forbid_overlap and running_root == task.root:
4415 if heuristic_overlap and running_root == task.root:
4416 # Never uninstall sys-apps/portage or it's essential
4417 # dependencies, except through replacement.
4419 runtime_dep_atoms = \
4420 list(runtime_deps.iterAtomsForPackage(task))
4421 except portage.exception.InvalidDependString as e:
4422 portage.writemsg("!!! Invalid PROVIDE in " + \
4423 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4424 (task.root, task.cpv, e), noiselevel=-1)
4428 # Don't uninstall a runtime dep if it appears
4429 # to be the only suitable one installed.
4431 vardb = root_config.trees["vartree"].dbapi
4432 for atom in runtime_dep_atoms:
4433 other_version = None
4434 for pkg in vardb.match_pkgs(atom):
4435 if pkg.cpv == task.cpv and \
4436 pkg.metadata["COUNTER"] == \
4437 task.metadata["COUNTER"]:
4441 if other_version is None:
4447 # For packages in the system set, don't take
4448 # any chances. If the conflict can't be resolved
4449 # by a normal replacement operation then abort.
4452 for atom in root_config.sets[
4453 "system"].iterAtomsForPackage(task):
4456 except portage.exception.InvalidDependString as e:
4457 portage.writemsg("!!! Invalid PROVIDE in " + \
4458 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4459 (task.root, task.cpv, e), noiselevel=-1)
4465 # Note that the world check isn't always
4466 # necessary since self._complete_graph() will
4467 # add all packages from the system and world sets to the
4468 # graph. This just allows unresolved conflicts to be
4469 # detected as early as possible, which makes it possible
4470 # to avoid calling self._complete_graph() when it is
4471 # unnecessary due to blockers triggering an abortion.
4473 # For packages in the world set, go ahead an uninstall
4474 # when necessary, as long as the atom will be satisfied
4475 # in the final state.
4476 graph_db = self._dynamic_config.mydbapi[task.root]
4479 for atom in root_config.sets[
4480 "selected"].iterAtomsForPackage(task):
4482 for pkg in graph_db.match_pkgs(atom):
4489 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
4491 except portage.exception.InvalidDependString as e:
4492 portage.writemsg("!!! Invalid PROVIDE in " + \
4493 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4494 (task.root, task.cpv, e), noiselevel=-1)
4500 # Check the deps of parent nodes to ensure that
4501 # the chosen task produces a leaf node. Maybe
4502 # this can be optimized some more to make the
4503 # best possible choice, but the current algorithm
4504 # is simple and should be near optimal for most
4506 self._spinner_update()
4507 mergeable_parent = False
4509 parent_deps.add(task)
4510 for parent in mygraph.parent_nodes(task):
4511 parent_deps.update(mygraph.child_nodes(parent,
4512 ignore_priority=priority_range.ignore_medium_soft))
4513 if min_parent_deps is not None and \
4514 len(parent_deps) >= min_parent_deps:
4515 # This task is no better than a previously selected
4516 # task, so abort search now in order to avoid wasting
4517 # any more cpu time on this task. This increases
4518 # performance dramatically in cases when there are
4519 # hundreds of blockers to solve, like when
4520 # upgrading to a new slot of kde-meta.
4521 mergeable_parent = None
4523 if parent in mergeable_nodes and \
4524 gather_deps(ignore_uninst_or_med_soft,
4525 mergeable_nodes, set(), parent):
4526 mergeable_parent = True
4528 if not mergeable_parent:
4531 if min_parent_deps is None or \
4532 len(parent_deps) < min_parent_deps:
4533 min_parent_deps = len(parent_deps)
4536 if uninst_task is not None and min_parent_deps == 1:
4537 # This is the best possible result, so so abort search
4538 # now in order to avoid wasting any more cpu time.
4541 if uninst_task is not None:
4542 # The uninstall is performed only after blocking
4543 # packages have been merged on top of it. File
4544 # collisions between blocking packages are detected
4545 # and removed from the list of files to be uninstalled.
4546 scheduled_uninstalls.add(uninst_task)
4547 parent_nodes = mygraph.parent_nodes(uninst_task)
4549 # Reverse the parent -> uninstall edges since we want
4550 # to do the uninstall after blocking packages have
4551 # been merged on top of it.
4552 mygraph.remove(uninst_task)
4553 for blocked_pkg in parent_nodes:
4554 mygraph.add(blocked_pkg, uninst_task,
4555 priority=BlockerDepPriority.instance)
4556 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4557 scheduler_graph.add(blocked_pkg, uninst_task,
4558 priority=BlockerDepPriority.instance)
4560 # Sometimes a merge node will render an uninstall
4561 # node unnecessary (due to occupying the same SLOT),
4562 # and we want to avoid executing a separate uninstall
4563 # task in that case.
4564 slot_node = self._dynamic_config.mydbapi[uninst_task.root
4565 ].match_pkgs(uninst_task.slot_atom)
4567 slot_node[0].operation == "merge":
4568 mygraph.add(slot_node[0], uninst_task,
4569 priority=BlockerDepPriority.instance)
4571 # Reset the state variables for leaf node selection and
4572 # continue trying to select leaf nodes.
4574 drop_satisfied = False
4577 if not selected_nodes:
4578 # Only select root nodes as a last resort. This case should
4579 # only trigger when the graph is nearly empty and the only
4580 # remaining nodes are isolated (no parents or children). Since
4581 # the nodes must be isolated, ignore_priority is not needed.
4582 selected_nodes = get_nodes()
4584 if not selected_nodes and not drop_satisfied:
4585 drop_satisfied = True
4588 if not selected_nodes and not myblocker_uninstalls.is_empty():
4589 # If possible, drop an uninstall task here in order to avoid
4590 # the circular deps code path. The corresponding blocker will
4591 # still be counted as an unresolved conflict.
4593 for node in myblocker_uninstalls.leaf_nodes():
4595 mygraph.remove(node)
4600 ignored_uninstall_tasks.add(node)
4603 if uninst_task is not None:
4604 # Reset the state variables for leaf node selection and
4605 # continue trying to select leaf nodes.
4607 drop_satisfied = False
4610 if not selected_nodes:
4611 self._dynamic_config._circular_deps_for_display = mygraph
4612 self._dynamic_config._skip_restart = True
4613 raise self._unknown_internal_error()
4615 # At this point, we've succeeded in selecting one or more nodes, so
4616 # reset state variables for leaf node selection.
4618 drop_satisfied = False
4620 mygraph.difference_update(selected_nodes)
4622 for node in selected_nodes:
4623 if isinstance(node, Package) and \
4624 node.operation == "nomerge":
4627 # Handle interactions between blockers
4628 # and uninstallation tasks.
4629 solved_blockers = set()
4631 if isinstance(node, Package) and \
4632 "uninstall" == node.operation:
4633 have_uninstall_task = True
4636 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
4637 inst_pkg = vardb.match_pkgs(node.slot_atom)
4639 # The package will be replaced by this one, so remove
4640 # the corresponding Uninstall task if necessary.
4641 inst_pkg = inst_pkg[0]
4642 uninst_task = Package(built=inst_pkg.built,
4643 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4644 metadata=inst_pkg.metadata,
4645 operation="uninstall",
4646 root_config=inst_pkg.root_config,
4647 type_name=inst_pkg.type_name)
4649 mygraph.remove(uninst_task)
4653 if uninst_task is not None and \
4654 uninst_task not in ignored_uninstall_tasks and \
4655 myblocker_uninstalls.contains(uninst_task):
4656 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4657 myblocker_uninstalls.remove(uninst_task)
4658 # Discard any blockers that this Uninstall solves.
4659 for blocker in blocker_nodes:
4660 if not myblocker_uninstalls.child_nodes(blocker):
4661 myblocker_uninstalls.remove(blocker)
4663 self._dynamic_config._unsolvable_blockers:
4664 solved_blockers.add(blocker)
4666 retlist.append(node)
4668 if (isinstance(node, Package) and \
4669 "uninstall" == node.operation) or \
4670 (uninst_task is not None and \
4671 uninst_task in scheduled_uninstalls):
4672 # Include satisfied blockers in the merge list
4673 # since the user might be interested and also
4674 # it serves as an indicator that blocking packages
4675 # will be temporarily installed simultaneously.
4676 for blocker in solved_blockers:
4677 retlist.append(blocker)
4679 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
4680 for node in myblocker_uninstalls.root_nodes():
4681 unsolvable_blockers.add(node)
4683 # If any Uninstall tasks need to be executed in order
4684 # to avoid a conflict, complete the graph with any
4685 # dependencies that may have been initially
4686 # neglected (to ensure that unsafe Uninstall tasks
4687 # are properly identified and blocked from execution).
4688 if have_uninstall_task and \
4690 not unsolvable_blockers:
4691 self._dynamic_config.myparams["complete"] = True
4692 if '--debug' in self._frozen_config.myopts:
4694 msg.append("enabling 'complete' depgraph mode " + \
4695 "due to uninstall task(s):")
4697 for node in retlist:
4698 if isinstance(node, Package) and \
4699 node.operation == 'uninstall':
4700 msg.append("\t%s" % (node,))
4701 writemsg_level("\n%s\n" % \
4702 "".join("%s\n" % line for line in msg),
4703 level=logging.DEBUG, noiselevel=-1)
4704 raise self._serialize_tasks_retry("")
4706 # Set satisfied state on blockers, but not before the
4707 # above retry path, since we don't want to modify the
4708 # state in that case.
4709 for node in retlist:
4710 if isinstance(node, Blocker):
4711 node.satisfied = True
4713 for blocker in unsolvable_blockers:
4714 retlist.append(blocker)
4716 if unsolvable_blockers and \
4717 not self._accept_blocker_conflicts():
4718 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
4719 self._dynamic_config._serialized_tasks_cache = retlist[:]
4720 self._dynamic_config._scheduler_graph = scheduler_graph
4721 self._dynamic_config._skip_restart = True
4722 raise self._unknown_internal_error()
4724 if self._dynamic_config._slot_collision_info and \
4725 not self._accept_blocker_conflicts():
4726 self._dynamic_config._serialized_tasks_cache = retlist[:]
4727 self._dynamic_config._scheduler_graph = scheduler_graph
4728 raise self._unknown_internal_error()
4730 return retlist, scheduler_graph
4732 def _show_circular_deps(self, mygraph):
4733 self._dynamic_config._circular_dependency_handler = \
4734 circular_dependency_handler(self, mygraph)
4735 handler = self._dynamic_config._circular_dependency_handler
4737 self._frozen_config.myopts.pop("--quiet", None)
4738 self._frozen_config.myopts["--verbose"] = True
4739 self._frozen_config.myopts["--tree"] = True
4740 portage.writemsg("\n\n", noiselevel=-1)
4741 self.display(handler.merge_list)
4742 prefix = colorize("BAD", " * ")
4743 portage.writemsg("\n", noiselevel=-1)
4744 portage.writemsg(prefix + "Error: circular dependencies:\n",
4746 portage.writemsg("\n", noiselevel=-1)
4748 if handler.circular_dep_message is None or \
4749 "--debug" in self._frozen_config.myopts:
4750 handler.debug_print()
4751 portage.writemsg("\n", noiselevel=-1)
4753 if handler.circular_dep_message is not None:
4754 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
4756 suggestions = handler.suggestions
4758 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
4759 if len(suggestions) == 1:
4760 writemsg("by applying the following change:\n", noiselevel=-1)
4762 writemsg("by applying " + colorize("bold", "any of") + \
4763 " the following changes:\n", noiselevel=-1)
4764 writemsg("".join(suggestions), noiselevel=-1)
4765 writemsg("\nNote that this change can be reverted, once the package has" + \
4766 " been installed.\n", noiselevel=-1)
4767 if handler.large_cycle_count:
4768 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
4769 "Several changes might be required to resolve all cycles.\n" + \
4770 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
4772 writemsg("\n\n", noiselevel=-1)
4773 writemsg(prefix + "Note that circular dependencies " + \
4774 "can often be avoided by temporarily\n", noiselevel=-1)
4775 writemsg(prefix + "disabling USE flags that trigger " + \
4776 "optional dependencies.\n", noiselevel=-1)
4778 def _show_merge_list(self):
4779 if self._dynamic_config._serialized_tasks_cache is not None and \
4780 not (self._dynamic_config._displayed_list and \
4781 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
4782 self._dynamic_config._displayed_list == \
4783 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
4784 display_list = self._dynamic_config._serialized_tasks_cache[:]
4785 if "--tree" in self._frozen_config.myopts:
4786 display_list.reverse()
4787 self.display(display_list)
4789 def _show_unsatisfied_blockers(self, blockers):
4790 self._show_merge_list()
4791 msg = "Error: The above package list contains " + \
4792 "packages which cannot be installed " + \
4793 "at the same time on the same system."
4794 prefix = colorize("BAD", " * ")
4795 portage.writemsg("\n", noiselevel=-1)
4796 for line in textwrap.wrap(msg, 70):
4797 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4799 # Display the conflicting packages along with the packages
4800 # that pulled them in. This is helpful for troubleshooting
4801 # cases in which blockers don't solve automatically and
4802 # the reasons are not apparent from the normal merge list
4806 for blocker in blockers:
4807 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
4808 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
4809 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4810 if not parent_atoms:
4811 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
4812 if atom is not None:
4813 parent_atoms = set([("@selected", atom)])
4815 conflict_pkgs[pkg] = parent_atoms
4818 # Reduce noise by pruning packages that are only
4819 # pulled in by other conflict packages.
4821 for pkg, parent_atoms in conflict_pkgs.items():
4822 relevant_parent = False
4823 for parent, atom in parent_atoms:
4824 if parent not in conflict_pkgs:
4825 relevant_parent = True
4827 if not relevant_parent:
4828 pruned_pkgs.add(pkg)
4829 for pkg in pruned_pkgs:
4830 del conflict_pkgs[pkg]
4836 # Max number of parents shown, to avoid flooding the display.
4838 for pkg, parent_atoms in conflict_pkgs.items():
4842 # Prefer packages that are not directly involved in a conflict.
4843 for parent_atom in parent_atoms:
4844 if len(pruned_list) >= max_parents:
4846 parent, atom = parent_atom
4847 if parent not in conflict_pkgs:
4848 pruned_list.add(parent_atom)
4850 for parent_atom in parent_atoms:
4851 if len(pruned_list) >= max_parents:
4853 pruned_list.add(parent_atom)
4855 omitted_parents = len(parent_atoms) - len(pruned_list)
4856 msg.append(indent + "%s pulled in by\n" % pkg)
4858 for parent_atom in pruned_list:
4859 parent, atom = parent_atom
4860 msg.append(2*indent)
4861 if isinstance(parent,
4862 (PackageArg, AtomArg)):
4863 # For PackageArg and AtomArg types, it's
4864 # redundant to display the atom attribute.
4865 msg.append(str(parent))
4867 # Display the specific atom from SetArg or
4869 msg.append("%s required by %s" % (atom, parent))
4873 msg.append(2*indent)
4874 msg.append("(and %d more)\n" % omitted_parents)
4878 sys.stderr.write("".join(msg))
4881 if "--quiet" not in self._frozen_config.myopts:
4882 show_blocker_docs_link()
4884 def display(self, mylist, favorites=[], verbosity=None):
4886 # This is used to prevent display_problems() from
4887 # redundantly displaying this exact same merge list
4888 # again via _show_merge_list().
4889 self._dynamic_config._displayed_list = mylist
4891 return display(self, mylist, favorites, verbosity)
4893 def display_problems(self):
4895 Display problems with the dependency graph such as slot collisions.
4896 This is called internally by display() to show the problems _after_
4897 the merge list where it is most likely to be seen, but if display()
4898 is not going to be called then this method should be called explicitly
4899 to ensure that the user is notified of problems with the graph.
4901 All output goes to stderr, except for unsatisfied dependencies which
4902 go to stdout for parsing by programs such as autounmask.
4905 # Note that show_masked_packages() sends it's output to
4906 # stdout, and some programs such as autounmask parse the
4907 # output in cases when emerge bails out. However, when
4908 # show_masked_packages() is called for installed packages
4909 # here, the message is a warning that is more appropriate
4910 # to send to stderr, so temporarily redirect stdout to
4911 # stderr. TODO: Fix output code so there's a cleaner way
4912 # to redirect everything to stderr.
4917 sys.stdout = sys.stderr
4918 self._display_problems()
4924 # This goes to stdout for parsing by programs like autounmask.
4925 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4926 self._show_unsatisfied_dep(*pargs, **kwargs)
4928 def _display_problems(self):
4929 if self._dynamic_config._circular_deps_for_display is not None:
4930 self._show_circular_deps(
4931 self._dynamic_config._circular_deps_for_display)
4933 # The user is only notified of a slot conflict if
4934 # there are no unresolvable blocker conflicts.
4935 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4936 self._show_unsatisfied_blockers(
4937 self._dynamic_config._unsatisfied_blockers_for_display)
4938 elif self._dynamic_config._slot_collision_info:
4939 self._show_slot_collision_notice()
4941 self._show_missed_update()
4943 unstable_keyword_msg = []
4944 for pkg in self._dynamic_config._needed_unstable_keywords:
4945 self._show_merge_list()
4946 if pkg in self._dynamic_config.digraph:
4947 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4948 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
4949 use=self._pkg_use_enabled(pkg))
4950 for reason in mreasons:
4951 if reason.unmask_hint and \
4952 reason.unmask_hint.key == 'unstable keyword':
4953 keyword = reason.unmask_hint.value
4955 unstable_keyword_msg.append(self._get_dep_chain_as_comment(pkg))
4956 unstable_keyword_msg.append("=%s %s\n" % (pkg.cpv, keyword))
4958 use_changes_msg = []
4959 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
4960 self._show_merge_list()
4961 if pkg in self._dynamic_config.digraph:
4962 changes = needed_use_config_change[1]
4964 for flag, state in changes.items():
4966 adjustments.append(flag)
4968 adjustments.append("-" + flag)
4969 use_changes_msg.append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
4970 use_changes_msg.append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
4973 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
4974 self._show_merge_list()
4975 if pkg in self._dynamic_config.digraph:
4976 license_msg.append(self._get_dep_chain_as_comment(pkg))
4977 license_msg.append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
4979 if unstable_keyword_msg:
4980 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
4981 " are necessary to proceed:\n", noiselevel=-1)
4982 writemsg_stdout("".join(unstable_keyword_msg), noiselevel=-1)
4985 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
4986 " are necessary to proceed:\n", noiselevel=-1)
4987 writemsg_stdout("".join(use_changes_msg), noiselevel=-1)
4990 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
4991 " are necessary to proceed:\n", noiselevel=-1)
4992 writemsg_stdout("".join(license_msg), noiselevel=-1)
4994 # TODO: Add generic support for "set problem" handlers so that
4995 # the below warnings aren't special cases for world only.
4997 if self._dynamic_config._missing_args:
4998 world_problems = False
4999 if "world" in self._dynamic_config.sets[
5000 self._frozen_config.target_root].sets:
5001 # Filter out indirect members of world (from nested sets)
5002 # since only direct members of world are desired here.
5003 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
5004 for arg, atom in self._dynamic_config._missing_args:
5005 if arg.name in ("selected", "world") and atom in world_set:
5006 world_problems = True
5010 sys.stderr.write("\n!!! Problems have been " + \
5011 "detected with your world file\n")
5012 sys.stderr.write("!!! Please run " + \
5013 green("emaint --check world")+"\n\n")
5015 if self._dynamic_config._missing_args:
5016 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5017 " Ebuilds for the following packages are either all\n")
5018 sys.stderr.write(colorize("BAD", "!!!") + \
5019 " masked or don't exist:\n")
5020 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5021 self._dynamic_config._missing_args) + "\n")
5023 if self._dynamic_config._pprovided_args:
5025 for arg, atom in self._dynamic_config._pprovided_args:
5026 if isinstance(arg, SetArg):
5028 arg_atom = (atom, atom)
5031 arg_atom = (arg.arg, atom)
5032 refs = arg_refs.setdefault(arg_atom, [])
5033 if parent not in refs:
5036 msg.append(bad("\nWARNING: "))
5037 if len(self._dynamic_config._pprovided_args) > 1:
5038 msg.append("Requested packages will not be " + \
5039 "merged because they are listed in\n")
5041 msg.append("A requested package will not be " + \
5042 "merged because it is listed in\n")
5043 msg.append("package.provided:\n\n")
5044 problems_sets = set()
5045 for (arg, atom), refs in arg_refs.items():
5048 problems_sets.update(refs)
5050 ref_string = ", ".join(["'%s'" % name for name in refs])
5051 ref_string = " pulled in by " + ref_string
5052 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5054 if "selected" in problems_sets or "world" in problems_sets:
5055 msg.append("This problem can be solved in one of the following ways:\n\n")
5056 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5057 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5058 msg.append(" C) Remove offending entries from package.provided.\n\n")
5059 msg.append("The best course of action depends on the reason that an offending\n")
5060 msg.append("package.provided entry exists.\n\n")
5061 sys.stderr.write("".join(msg))
5063 masked_packages = []
5064 for pkg in self._dynamic_config._masked_license_updates:
5065 root_config = pkg.root_config
5066 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5067 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
5068 masked_packages.append((root_config, pkgsettings,
5069 pkg.cpv, pkg.metadata, mreasons))
5071 writemsg("\n" + colorize("BAD", "!!!") + \
5072 " The following updates are masked by LICENSE changes:\n",
5074 show_masked_packages(masked_packages)
5076 writemsg("\n", noiselevel=-1)
5078 masked_packages = []
5079 for pkg in self._dynamic_config._masked_installed:
5080 root_config = pkg.root_config
5081 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5082 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
5083 masked_packages.append((root_config, pkgsettings,
5084 pkg.cpv, pkg.metadata, mreasons))
5086 writemsg("\n" + colorize("BAD", "!!!") + \
5087 " The following installed packages are masked:\n",
5089 show_masked_packages(masked_packages)
5091 writemsg("\n", noiselevel=-1)
5093 def saveNomergeFavorites(self):
5094 """Find atoms in favorites that are not in the mergelist and add them
5095 to the world file if necessary."""
5096 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5097 "--oneshot", "--onlydeps", "--pretend"):
5098 if x in self._frozen_config.myopts:
5100 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5101 world_set = root_config.sets["selected"]
5103 world_locked = False
5104 if hasattr(world_set, "lock"):
5108 if hasattr(world_set, "load"):
5109 world_set.load() # maybe it's changed on disk
5111 args_set = self._dynamic_config.sets[
5112 self._frozen_config.target_root].sets['__non_set_args__']
5113 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
5114 added_favorites = set()
5115 for x in self._dynamic_config._set_nodes:
5116 if x.operation != "nomerge":
5119 if x.root != root_config.root:
5123 myfavkey = create_world_atom(x, args_set, root_config)
5125 if myfavkey in added_favorites:
5127 added_favorites.add(myfavkey)
5128 except portage.exception.InvalidDependString as e:
5129 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5130 (x.cpv, e), noiselevel=-1)
5131 writemsg("!!! see '%s'\n\n" % os.path.join(
5132 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
5135 for arg in self._dynamic_config._initial_arg_list:
5136 if not isinstance(arg, SetArg):
5138 if arg.root_config.root != root_config.root:
5141 if k in ("selected", "world") or \
5142 not root_config.sets[k].world_candidate:
5147 all_added.append(SETPREFIX + k)
5148 all_added.extend(added_favorites)
5151 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
5152 colorize("INFORM", str(a)), noiselevel=-1)
5154 world_set.update(all_added)
5159 def _loadResumeCommand(self, resume_data, skip_masked=True,
5162 Add a resume command to the graph and validate it in the process. This
5163 will raise a PackageNotFound exception if a package is not available.
5168 if not isinstance(resume_data, dict):
5171 mergelist = resume_data.get("mergelist")
5172 if not isinstance(mergelist, list):
5175 fakedb = self._dynamic_config.mydbapi
5176 trees = self._frozen_config.trees
5177 serialized_tasks = []
5180 if not (isinstance(x, list) and len(x) == 4):
5182 pkg_type, myroot, pkg_key, action = x
5183 if pkg_type not in self.pkg_tree_map:
5185 if action != "merge":
5187 root_config = self._frozen_config.roots[myroot]
5189 pkg = self._pkg(pkg_key, pkg_type, root_config)
5190 except portage.exception.PackageNotFound:
5191 # It does no exist or it is corrupt.
5193 # TODO: log these somewhere
5197 if "merge" == pkg.operation and \
5198 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
5199 modified_use=self._pkg_use_enabled(pkg)):
5202 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
5204 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
5206 self._dynamic_config._unsatisfied_deps_for_display.append(
5207 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5209 fakedb[myroot].cpv_inject(pkg)
5210 serialized_tasks.append(pkg)
5211 self._spinner_update()
5213 if self._dynamic_config._unsatisfied_deps_for_display:
5216 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5217 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5218 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5220 self._select_package = self._select_pkg_from_graph
5221 self._dynamic_config.myparams["selective"] = True
5222 # Always traverse deep dependencies in order to account for
5223 # potentially unsatisfied dependencies of installed packages.
5224 # This is necessary for correct --keep-going or --resume operation
5225 # in case a package from a group of circularly dependent packages
5226 # fails. In this case, a package which has recently been installed
5227 # may have an unsatisfied circular dependency (pulled in by
5228 # PDEPEND, for example). So, even though a package is already
5229 # installed, it may not have all of it's dependencies satisfied, so
5230 # it may not be usable. If such a package is in the subgraph of
5231 # deep depenedencies of a scheduled build, that build needs to
5232 # be cancelled. In order for this type of situation to be
5233 # recognized, deep traversal of dependencies is required.
5234 self._dynamic_config.myparams["deep"] = True
5236 favorites = resume_data.get("favorites")
5237 args_set = self._dynamic_config.sets[
5238 self._frozen_config.target_root].sets['__non_set_args__']
5239 if isinstance(favorites, list):
5240 args = self._load_favorites(favorites)
5244 for task in serialized_tasks:
5245 if isinstance(task, Package) and \
5246 task.operation == "merge":
5247 if not self._add_pkg(task, None):
5250 # Packages for argument atoms need to be explicitly
5251 # added via _add_pkg() so that they are included in the
5252 # digraph (needed at least for --tree display).
5253 for arg in self._expand_set_args(args, add_to_digraph=True):
5254 for atom in arg.pset.getAtoms():
5255 pkg, existing_node = self._select_package(
5256 arg.root_config.root, atom)
5257 if existing_node is None and \
5259 if not self._add_pkg(pkg, Dependency(atom=atom,
5260 root=pkg.root, parent=arg)):
5263 # Allow unsatisfied deps here to avoid showing a masking
5264 # message for an unsatisfied dep that isn't necessarily
5266 if not self._create_graph(allow_unsatisfied=True):
5269 unsatisfied_deps = []
5270 for dep in self._dynamic_config._unsatisfied_deps:
5271 if not isinstance(dep.parent, Package):
5273 if dep.parent.operation == "merge":
5274 unsatisfied_deps.append(dep)
5277 # For unsatisfied deps of installed packages, only account for
5278 # them if they are in the subgraph of dependencies of a package
5279 # which is scheduled to be installed.
5280 unsatisfied_install = False
5282 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5284 node = dep_stack.pop()
5285 if not isinstance(node, Package):
5287 if node.operation == "merge":
5288 unsatisfied_install = True
5290 if node in traversed:
5293 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5295 if unsatisfied_install:
5296 unsatisfied_deps.append(dep)
5298 if masked_tasks or unsatisfied_deps:
5299 # This probably means that a required package
5300 # was dropped via --skipfirst. It makes the
5301 # resume list invalid, so convert it to a
5302 # UnsatisfiedResumeDep exception.
5303 raise self.UnsatisfiedResumeDep(self,
5304 masked_tasks + unsatisfied_deps)
5305 self._dynamic_config._serialized_tasks_cache = None
5308 except self._unknown_internal_error:
5313 def _load_favorites(self, favorites):
5315 Use a list of favorites to resume state from a
5316 previous select_files() call. This creates similar
5317 DependencyArg instances to those that would have
5318 been created by the original select_files() call.
5319 This allows Package instances to be matched with
5320 DependencyArg instances during graph creation.
5322 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5323 sets = root_config.sets
5324 depgraph_sets = self._dynamic_config.sets[root_config.root]
5327 if not isinstance(x, basestring):
5329 if x in ("system", "world"):
5331 if x.startswith(SETPREFIX):
5332 s = x[len(SETPREFIX):]
5335 if s in depgraph_sets.sets:
5338 depgraph_sets.sets[s] = pset
5339 args.append(SetArg(arg=x, pset=pset,
5340 root_config=root_config))
5344 except portage.exception.InvalidAtom:
5346 args.append(AtomArg(arg=x, atom=x,
5347 root_config=root_config))
5349 self._set_args(args)
5352 class UnsatisfiedResumeDep(portage.exception.PortageException):
5354 A dependency of a resume list is not installed. This
5355 can occur when a required package is dropped from the
5356 merge list via --skipfirst.
5358 def __init__(self, depgraph, value):
5359 portage.exception.PortageException.__init__(self, value)
5360 self.depgraph = depgraph
5362 class _internal_exception(portage.exception.PortageException):
5363 def __init__(self, value=""):
5364 portage.exception.PortageException.__init__(self, value)
5366 class _unknown_internal_error(_internal_exception):
5368 Used by the depgraph internally to terminate graph creation.
5369 The specific reason for the failure should have been dumped
5370 to stderr, unfortunately, the exact reason for the failure
5374 class _serialize_tasks_retry(_internal_exception):
5376 This is raised by the _serialize_tasks() method when it needs to
5377 be called again for some reason. The only case that it's currently
5378 used for is when neglected dependencies need to be added to the
5379 graph in order to avoid making a potentially unsafe decision.
5382 class _backtrack_mask(_internal_exception):
5384 This is raised by _show_unsatisfied_dep() when it's called with
5385 check_backtrack=True and a matching package has been masked by
5389 def need_restart(self):
5390 return self._dynamic_config._need_restart and \
5391 not self._dynamic_config._skip_restart
5393 def get_backtrack_parameters(self):
5395 "needed_unstable_keywords":
5396 self._dynamic_config._needed_unstable_keywords.copy(), \
5398 self._dynamic_config._runtime_pkg_mask.copy(),
5399 "needed_use_config_changes":
5400 self._dynamic_config._needed_use_config_changes.copy(),
5401 "needed_license_changes":
5402 self._dynamic_config._needed_license_changes.copy(),
5406 class _dep_check_composite_db(dbapi):
5408 A dbapi-like interface that is optimized for use in dep_check() calls.
5409 This is built on top of the existing depgraph package selection logic.
5410 Some packages that have been added to the graph may be masked from this
5411 view in order to influence the atom preference selection that occurs
5414 def __init__(self, depgraph, root):
5415 dbapi.__init__(self)
5416 self._depgraph = depgraph
5418 self._match_cache = {}
5419 self._cpv_pkg_map = {}
5421 def _clear_cache(self):
5422 self._match_cache.clear()
5423 self._cpv_pkg_map.clear()
5425 def match(self, atom):
5426 ret = self._match_cache.get(atom)
5431 atom = self._dep_expand(atom)
5432 pkg, existing = self._depgraph._select_package(self._root, atom)
5436 # Return the highest available from select_package() as well as
5437 # any matching slots in the graph db.
5439 slots.add(pkg.metadata["SLOT"])
5440 if pkg.cp.startswith("virtual/"):
5441 # For new-style virtual lookahead that occurs inside
5442 # dep_check(), examine all slots. This is needed
5443 # so that newer slots will not unnecessarily be pulled in
5444 # when a satisfying lower slot is already installed. For
5445 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5446 # there's no need to pull in a newer slot to satisfy a
5447 # virtual/jdk dependency.
5448 for db, pkg_type, built, installed, db_keys in \
5449 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5450 for cpv in db.match(atom):
5451 if portage.cpv_getkey(cpv) != pkg.cp:
5453 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5455 if self._visible(pkg):
5456 self._cpv_pkg_map[pkg.cpv] = pkg
5458 slots.remove(pkg.metadata["SLOT"])
5460 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5461 pkg, existing = self._depgraph._select_package(
5462 self._root, slot_atom)
5465 if not self._visible(pkg):
5467 self._cpv_pkg_map[pkg.cpv] = pkg
5470 self._cpv_sort_ascending(ret)
5471 self._match_cache[orig_atom] = ret
5474 def _visible(self, pkg):
5475 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5477 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5478 except (StopIteration, portage.exception.InvalidDependString):
5482 if pkg.installed and \
5483 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
5484 # Account for packages with masks (like KEYWORDS masks)
5485 # that are usually ignored in visibility checks for
5486 # installed packages, in order to handle cases like
5488 myopts = self._depgraph._frozen_config.myopts
5489 use_ebuild_visibility = myopts.get(
5490 '--use-ebuild-visibility', 'n') != 'n'
5491 avoid_update = "--update" not in myopts and \
5492 "remove" not in self._depgraph._dynamic_config.myparams
5493 usepkgonly = "--usepkgonly" in myopts
5494 if not avoid_update:
5495 if not use_ebuild_visibility and usepkgonly:
5499 pkg_eb = self._depgraph._pkg(
5500 pkg.cpv, "ebuild", pkg.root_config)
5501 except portage.exception.PackageNotFound:
5504 if not self._depgraph._pkg_visibility_check(pkg_eb):
5507 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5508 self._root].get(pkg.slot_atom)
5509 if in_graph is None:
5510 # Mask choices for packages which are not the highest visible
5511 # version within their slot (since they usually trigger slot
5513 highest_visible, in_graph = self._depgraph._select_package(
5514 self._root, pkg.slot_atom)
5515 # Note: highest_visible is not necessarily the real highest
5516 # visible, especially when --update is not enabled, so use
5517 # < operator instead of !=.
5518 if pkg < highest_visible:
5520 elif in_graph != pkg:
5521 # Mask choices for packages that would trigger a slot
5522 # conflict with a previously selected package.
5526 def _dep_expand(self, atom):
5528 This is only needed for old installed packages that may
5529 contain atoms that are not fully qualified with a specific
5530 category. Emulate the cpv_expand() function that's used by
5531 dbapi.match() in cases like this. If there are multiple
5532 matches, it's often due to a new-style virtual that has
5533 been added, so try to filter those out to avoid raising
5536 root_config = self._depgraph.roots[self._root]
5538 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5539 if len(expanded_atoms) > 1:
5540 non_virtual_atoms = []
5541 for x in expanded_atoms:
5542 if not x.cp.startswith("virtual/"):
5543 non_virtual_atoms.append(x)
5544 if len(non_virtual_atoms) == 1:
5545 expanded_atoms = non_virtual_atoms
5546 if len(expanded_atoms) > 1:
5547 # compatible with portage.cpv_expand()
5548 raise portage.exception.AmbiguousPackageName(
5549 [x.cp for x in expanded_atoms])
5551 atom = expanded_atoms[0]
5553 null_atom = Atom(insert_category_into_atom(atom, "null"))
5554 cat, atom_pn = portage.catsplit(null_atom.cp)
5555 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5557 # Allow the resolver to choose which virtual.
5558 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5563 def aux_get(self, cpv, wants):
5564 metadata = self._cpv_pkg_map[cpv].metadata
5565 return [metadata.get(x, "") for x in wants]
5567 def match_pkgs(self, atom):
5568 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5570 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5572 if "--quiet" in myopts:
5573 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5574 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
5575 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5576 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
5579 s = search(root_config, spinner, "--searchdesc" in myopts,
5580 "--quiet" not in myopts, "--usepkg" in myopts,
5581 "--usepkgonly" in myopts)
5582 null_cp = portage.dep_getkey(insert_category_into_atom(
5584 cat, atom_pn = portage.catsplit(null_cp)
5585 s.searchkey = atom_pn
5586 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5589 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5590 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
5592 def insert_category_into_atom(atom, category):
5593 alphanum = re.search(r'\w', atom)
5595 ret = atom[:alphanum.start()] + "%s/" % category + \
5596 atom[alphanum.start():]
5601 def _spinner_start(spinner, myopts):
5604 if "--quiet" not in myopts and \
5605 ("--pretend" in myopts or "--ask" in myopts or \
5606 "--tree" in myopts or "--verbose" in myopts):
5608 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5610 elif "--buildpkgonly" in myopts:
5614 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5615 if "--unordered-display" in myopts:
5616 portage.writemsg_stdout("\n" + \
5617 darkgreen("These are the packages that " + \
5618 "would be %s:" % action) + "\n\n")
5620 portage.writemsg_stdout("\n" + \
5621 darkgreen("These are the packages that " + \
5622 "would be %s, in reverse order:" % action) + "\n\n")
5624 portage.writemsg_stdout("\n" + \
5625 darkgreen("These are the packages that " + \
5626 "would be %s, in order:" % action) + "\n\n")
5628 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5629 if not show_spinner:
5630 spinner.update = spinner.update_quiet
5633 portage.writemsg_stdout("Calculating dependencies ")
5635 def _spinner_stop(spinner):
5636 if spinner is None or \
5637 spinner.update is spinner.update_quiet:
5640 portage.writemsg_stdout("\b\b... done!\n")
5642 def backtrack_depgraph(settings, trees, myopts, myparams,
5643 myaction, myfiles, spinner):
5645 Raises PackageSetNotFound if myfiles contains a missing package set.
5647 _spinner_start(spinner, myopts)
5649 return _backtrack_depgraph(settings, trees, myopts, myparams,
5650 myaction, myfiles, spinner)
5652 _spinner_stop(spinner)
5654 def _backtrack_depgraph(settings, trees, myopts, myparams,
5655 myaction, myfiles, spinner):
5657 backtrack_max = myopts.get('--backtrack', 10)
5658 backtrack_parameters = {}
5659 needed_unstable_keywords = None
5660 allow_backtracking = backtrack_max > 0
5662 frozen_config = _frozen_depgraph_config(settings, trees,
5665 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5666 frozen_config=frozen_config,
5667 allow_backtracking=allow_backtracking,
5668 **backtrack_parameters)
5669 success, favorites = mydepgraph.select_files(myfiles)
5671 if mydepgraph.need_restart() and backtracked < backtrack_max:
5672 backtrack_parameters = mydepgraph.get_backtrack_parameters()
5674 elif backtracked and allow_backtracking:
5675 if "--debug" in myopts:
5677 "\n\nbacktracking aborted after %s tries\n\n" % \
5678 backtracked, noiselevel=-1, level=logging.DEBUG)
5679 # Backtracking failed, so disable it and do
5680 # a plain dep calculation + error message.
5681 allow_backtracking = False
5682 #Don't reset needed_unstable_keywords here, since we don't want to
5683 #send the user through a "one step at a time" unmasking session for
5685 backtrack_parameters.pop('runtime_pkg_mask', None)
5690 return (success, mydepgraph, favorites)
5692 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5694 Raises PackageSetNotFound if myfiles contains a missing package set.
5696 _spinner_start(spinner, myopts)
5698 return _resume_depgraph(settings, trees, mtimedb, myopts,
5701 _spinner_stop(spinner)
5703 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5705 Construct a depgraph for the given resume list. This will raise
5706 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5707 TODO: Return reasons for dropped_tasks, for display/logging.
5709 @returns: (success, depgraph, dropped_tasks)
5712 skip_unsatisfied = True
5713 mergelist = mtimedb["resume"]["mergelist"]
5714 dropped_tasks = set()
5715 frozen_config = _frozen_depgraph_config(settings, trees,
5718 mydepgraph = depgraph(settings, trees,
5719 myopts, myparams, spinner, frozen_config=frozen_config)
5721 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5722 skip_masked=skip_masked)
5723 except depgraph.UnsatisfiedResumeDep as e:
5724 if not skip_unsatisfied:
5727 graph = mydepgraph._dynamic_config.digraph
5728 unsatisfied_parents = dict((dep.parent, dep.parent) \
5730 traversed_nodes = set()
5731 unsatisfied_stack = list(unsatisfied_parents)
5732 while unsatisfied_stack:
5733 pkg = unsatisfied_stack.pop()
5734 if pkg in traversed_nodes:
5736 traversed_nodes.add(pkg)
5738 # If this package was pulled in by a parent
5739 # package scheduled for merge, removing this
5740 # package may cause the the parent package's
5741 # dependency to become unsatisfied.
5742 for parent_node in graph.parent_nodes(pkg):
5743 if not isinstance(parent_node, Package) \
5744 or parent_node.operation not in ("merge", "nomerge"):
5747 graph.child_nodes(parent_node,
5748 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5749 if pkg in unsatisfied:
5750 unsatisfied_parents[parent_node] = parent_node
5751 unsatisfied_stack.append(parent_node)
5753 pruned_mergelist = []
5755 if isinstance(x, list) and \
5756 tuple(x) not in unsatisfied_parents:
5757 pruned_mergelist.append(x)
5759 # If the mergelist doesn't shrink then this loop is infinite.
5760 if len(pruned_mergelist) == len(mergelist):
5761 # This happens if a package can't be dropped because
5762 # it's already installed, but it has unsatisfied PDEPEND.
5764 mergelist[:] = pruned_mergelist
5766 # Exclude installed packages that have been removed from the graph due
5767 # to failure to build/install runtime dependencies after the dependent
5768 # package has already been installed.
5769 dropped_tasks.update(pkg for pkg in \
5770 unsatisfied_parents if pkg.operation != "nomerge")
5771 mydepgraph.break_refs(unsatisfied_parents)
5773 del e, graph, traversed_nodes, \
5774 unsatisfied_parents, unsatisfied_stack
5778 return (success, mydepgraph, dropped_tasks)
5780 def get_mask_info(root_config, cpv, pkgsettings,
5781 db, pkg_type, built, installed, db_keys, _pkg_use_enabled=None):
5784 metadata = dict(zip(db_keys,
5785 db.aux_get(cpv, db_keys)))
5789 if metadata is None:
5790 mreasons = ["corruption"]
5792 eapi = metadata['EAPI']
5795 if not portage.eapi_is_supported(eapi):
5796 mreasons = ['EAPI %s' % eapi]
5798 pkg = Package(type_name=pkg_type, root_config=root_config,
5799 cpv=cpv, built=built, installed=installed, metadata=metadata)
5802 if _pkg_use_enabled is not None:
5803 modified_use = _pkg_use_enabled(pkg)
5805 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=modified_use)
5806 return metadata, mreasons
5808 def show_masked_packages(masked_packages):
5809 shown_licenses = set()
5810 shown_comments = set()
5811 # Maybe there is both an ebuild and a binary. Only
5812 # show one of them to avoid redundant appearance.
5814 have_eapi_mask = False
5815 for (root_config, pkgsettings, cpv,
5816 metadata, mreasons) in masked_packages:
5817 if cpv in shown_cpvs:
5820 comment, filename = None, None
5821 if "package.mask" in mreasons:
5822 comment, filename = \
5823 portage.getmaskingreason(
5824 cpv, metadata=metadata,
5825 settings=pkgsettings,
5826 portdb=root_config.trees["porttree"].dbapi,
5827 return_location=True)
5828 missing_licenses = []
5830 if not portage.eapi_is_supported(metadata["EAPI"]):
5831 have_eapi_mask = True
5833 missing_licenses = \
5834 pkgsettings._getMissingLicenses(
5836 except portage.exception.InvalidDependString:
5837 # This will have already been reported
5838 # above via mreasons.
5841 writemsg_stdout("- "+cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
5843 if comment and comment not in shown_comments:
5844 writemsg_stdout(filename + ":\n" + comment + "\n",
5846 shown_comments.add(comment)
5847 portdb = root_config.trees["porttree"].dbapi
5848 for l in missing_licenses:
5849 l_path = portdb.findLicensePath(l)
5850 if l in shown_licenses:
5852 msg = ("A copy of the '%s' license" + \
5853 " is located at '%s'.\n\n") % (l, l_path)
5854 writemsg_stdout(msg, noiselevel=-1)
5855 shown_licenses.add(l)
5856 return have_eapi_mask
5858 def show_mask_docs():
5859 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
5860 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
5862 def show_blocker_docs_link():
5863 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
5864 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
5865 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
5867 def get_masking_status(pkg, pkgsettings, root_config, use=None):
5868 return [mreason.message for \
5869 mreason in _get_masking_status(pkg, pkgsettings, root_config, use=use)]
5871 def _get_masking_status(pkg, pkgsettings, root_config, use=None):
5873 mreasons = _getmaskingstatus(
5874 pkg, settings=pkgsettings,
5875 portdb=root_config.trees["porttree"].dbapi)
5877 if not pkg.installed:
5878 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5879 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
5880 pkg.metadata["CHOST"]))
5883 for msg_type, msgs in pkg.invalid.items():
5886 _MaskReason("invalid", "invalid: %s" % (msg,)))
5888 if not pkg.metadata["SLOT"]:
5890 _MaskReason("invalid", "SLOT: undefined"))