1 # Copyright 1999-2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
11 from itertools import chain
14 from portage import os
15 from portage import _unicode_decode
16 from portage.const import PORTAGE_PACKAGE_ATOM
17 from portage.dbapi import dbapi
18 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
19 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
20 from portage.exception import InvalidAtom
21 from portage.output import colorize, create_color_func, \
23 bad = create_color_func("BAD")
24 from portage.package.ebuild.getmaskingstatus import \
25 _getmaskingstatus, _MaskReason
26 from portage._sets import SETPREFIX
27 from portage._sets.base import InternalPackageSet
28 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
29 from portage.util import writemsg_level
30 from portage.util.digraph import digraph
32 from _emerge.AtomArg import AtomArg
33 from _emerge.Blocker import Blocker
34 from _emerge.BlockerCache import BlockerCache
35 from _emerge.BlockerDepPriority import BlockerDepPriority
36 from _emerge.countdown import countdown
37 from _emerge.create_world_atom import create_world_atom
38 from _emerge.Dependency import Dependency
39 from _emerge.DependencyArg import DependencyArg
40 from _emerge.DepPriority import DepPriority
41 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
42 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
43 from _emerge.FakeVartree import FakeVartree
44 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
45 from _emerge.is_valid_package_atom import is_valid_package_atom
46 from _emerge.Package import Package
47 from _emerge.PackageArg import PackageArg
48 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
49 from _emerge.RootConfig import RootConfig
50 from _emerge.search import search
51 from _emerge.SetArg import SetArg
52 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
53 from _emerge.UnmergeDepPriority import UnmergeDepPriority
55 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
56 from _emerge.resolver.slot_collision import slot_conflict_handler
57 from _emerge.resolver.circular_dependency import circular_dependency_handler
58 from _emerge.resolver.output import display, filter_iuse_defaults
60 if sys.hexversion >= 0x3000000:
64 class _scheduler_graph_config(object):
65 def __init__(self, trees, pkg_cache, graph, mergelist):
67 self.pkg_cache = pkg_cache
69 self.mergelist = mergelist
71 class _frozen_depgraph_config(object):
73 def __init__(self, settings, trees, myopts, spinner):
74 self.settings = settings
75 self.target_root = settings["ROOT"]
78 if settings.get("PORTAGE_DEBUG", "") == "1":
80 self.spinner = spinner
81 self._running_root = trees["/"]["root_config"]
82 self._opts_no_restart = frozenset(["--buildpkgonly",
83 "--fetchonly", "--fetch-all-uri", "--pretend"])
86 self._trees_orig = trees
88 # All Package instances
90 self._highest_license_masked = {}
92 self.trees[myroot] = {}
93 # Create a RootConfig instance that references
94 # the FakeVartree instead of the real one.
95 self.roots[myroot] = RootConfig(
96 trees[myroot]["vartree"].settings,
98 trees[myroot]["root_config"].setconfig)
99 for tree in ("porttree", "bintree"):
100 self.trees[myroot][tree] = trees[myroot][tree]
101 self.trees[myroot]["vartree"] = \
102 FakeVartree(trees[myroot]["root_config"],
103 pkg_cache=self._pkg_cache,
104 pkg_root_config=self.roots[myroot])
105 self.pkgsettings[myroot] = portage.config(
106 clone=self.trees[myroot]["vartree"].settings)
108 self._required_set_names = set(["world"])
110 self.excluded_pkgs = InternalPackageSet(allow_wildcard=True)
111 for x in ' '.join(myopts.get("--exclude", [])).split():
113 x = Atom(x, allow_wildcard=True)
114 except portage.exception.InvalidAtom:
115 x = Atom("*/" + x, allow_wildcard=True)
116 self.excluded_pkgs.add(x)
118 class _depgraph_sets(object):
120 # contains all sets added to the graph
122 # contains non-set atoms given as arguments
123 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
124 # contains all atoms from all sets added to the graph, including
125 # atoms given as arguments
126 self.atoms = InternalPackageSet(allow_repo=True)
127 self.atom_arg_map = {}
129 class _dynamic_depgraph_config(object):
131 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
132 self.myparams = myparams.copy()
133 self._vdb_loaded = False
134 self._allow_backtracking = allow_backtracking
135 # Maps slot atom to package for each Package added to the graph.
136 self._slot_pkg_map = {}
137 # Maps nodes to the reasons they were selected for reinstallation.
138 self._reinstall_nodes = {}
140 # Contains a filtered view of preferred packages that are selected
141 # from available repositories.
142 self._filtered_trees = {}
143 # Contains installed packages and new packages that have been added
145 self._graph_trees = {}
146 # Caches visible packages returned from _select_package, for use in
147 # depgraph._iter_atoms_for_pkg() SLOT logic.
148 self._visible_pkgs = {}
149 #contains the args created by select_files
150 self._initial_arg_list = []
151 self.digraph = portage.digraph()
152 # manages sets added to the graph
154 # contains all nodes pulled in by self.sets
155 self._set_nodes = set()
156 # Contains only Blocker -> Uninstall edges
157 self._blocker_uninstalls = digraph()
158 # Contains only Package -> Blocker edges
159 self._blocker_parents = digraph()
160 # Contains only irrelevant Package -> Blocker edges
161 self._irrelevant_blockers = digraph()
162 # Contains only unsolvable Package -> Blocker edges
163 self._unsolvable_blockers = digraph()
164 # Contains all Blocker -> Blocked Package edges
165 self._blocked_pkgs = digraph()
166 # Contains world packages that have been protected from
167 # uninstallation but may not have been added to the graph
168 # if the graph is not complete yet.
169 self._blocked_world_pkgs = {}
170 # Contains packages whose dependencies have been traversed.
171 # This use used to check if we have accounted for blockers
172 # relevant to a package.
173 self._traversed_pkg_deps = set()
174 self._slot_collision_info = {}
175 # Slot collision nodes are not allowed to block other packages since
176 # blocker validation is only able to account for one package per slot.
177 self._slot_collision_nodes = set()
178 self._parent_atoms = {}
179 self._slot_conflict_parent_atoms = set()
180 self._slot_conflict_handler = None
181 self._circular_dependency_handler = None
182 self._serialized_tasks_cache = None
183 self._scheduler_graph = None
184 self._displayed_list = None
185 self._pprovided_args = []
186 self._missing_args = []
187 self._masked_installed = set()
188 self._masked_license_updates = set()
189 self._unsatisfied_deps_for_display = []
190 self._unsatisfied_blockers_for_display = None
191 self._circular_deps_for_display = None
193 self._dep_disjunctive_stack = []
194 self._unsatisfied_deps = []
195 self._initially_unsatisfied_deps = []
196 self._ignored_deps = []
197 self._highest_pkg_cache = {}
199 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
200 self._needed_license_changes = backtrack_parameters.needed_license_changes
201 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
202 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
203 self._need_restart = False
204 self._backtrack_infos = {}
206 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True
207 self._success_without_autounmask = False
209 for myroot in depgraph._frozen_config.trees:
210 self.sets[myroot] = _depgraph_sets()
211 self._slot_pkg_map[myroot] = {}
212 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
213 # This dbapi instance will model the state that the vdb will
214 # have after new packages have been installed.
215 fakedb = PackageVirtualDbapi(vardb.settings)
217 self.mydbapi[myroot] = fakedb
220 graph_tree.dbapi = fakedb
221 self._graph_trees[myroot] = {}
222 self._filtered_trees[myroot] = {}
223 # Substitute the graph tree for the vartree in dep_check() since we
224 # want atom selections to be consistent with package selections
225 # have already been made.
226 self._graph_trees[myroot]["porttree"] = graph_tree
227 self._graph_trees[myroot]["vartree"] = graph_tree
230 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
231 self._filtered_trees[myroot]["porttree"] = filtered_tree
232 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
234 # Passing in graph_tree as the vartree here could lead to better
235 # atom selections in some cases by causing atoms for packages that
236 # have been added to the graph to be preferred over other choices.
237 # However, it can trigger atom selections that result in
238 # unresolvable direct circular dependencies. For example, this
239 # happens with gwydion-dylan which depends on either itself or
240 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
241 # gwydion-dylan-bin needs to be selected in order to avoid a
242 # an unresolvable direct circular dependency.
244 # To solve the problem described above, pass in "graph_db" so that
245 # packages that have been added to the graph are distinguishable
246 # from other available packages and installed packages. Also, pass
247 # the parent package into self._select_atoms() calls so that
248 # unresolvable direct circular dependencies can be detected and
249 # avoided when possible.
250 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
251 self._filtered_trees[myroot]["vartree"] = \
252 depgraph._frozen_config.trees[myroot]["vartree"]
255 # (db, pkg_type, built, installed, db_keys)
256 if "remove" in self.myparams:
257 # For removal operations, use _dep_check_composite_db
258 # for availability and visibility checks. This provides
259 # consistency with install operations, so we don't
260 # get install/uninstall cycles like in bug #332719.
261 self._graph_trees[myroot]["porttree"] = filtered_tree
263 if "--usepkgonly" not in depgraph._frozen_config.myopts:
264 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
265 db_keys = list(portdb._aux_cache_keys)
266 dbs.append((portdb, "ebuild", False, False, db_keys))
268 if "--usepkg" in depgraph._frozen_config.myopts:
269 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
270 db_keys = list(bindb._aux_cache_keys)
271 dbs.append((bindb, "binary", True, False, db_keys))
273 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
274 db_keys = list(depgraph._frozen_config._trees_orig[myroot
275 ]["vartree"].dbapi._aux_cache_keys)
276 dbs.append((vardb, "installed", True, True, db_keys))
277 self._filtered_trees[myroot]["dbs"] = dbs
279 class depgraph(object):
281 pkg_tree_map = RootConfig.pkg_tree_map
283 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
285 def __init__(self, settings, trees, myopts, myparams, spinner,
286 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
287 if frozen_config is None:
288 frozen_config = _frozen_depgraph_config(settings, trees,
290 self._frozen_config = frozen_config
291 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
292 allow_backtracking, backtrack_parameters)
294 self._select_atoms = self._select_atoms_highest_available
295 self._select_package = self._select_pkg_highest_available
299 Load installed package metadata if appropriate. This used to be called
300 from the constructor, but that wasn't very nice since this procedure
301 is slow and it generates spinner output. So, now it's called on-demand
302 by various methods when necessary.
305 if self._dynamic_config._vdb_loaded:
308 for myroot in self._frozen_config.trees:
310 preload_installed_pkgs = \
311 "--nodeps" not in self._frozen_config.myopts
313 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
314 if not fake_vartree.dbapi:
315 # This needs to be called for the first depgraph, but not for
316 # backtracking depgraphs that share the same frozen_config.
319 # FakeVartree.sync() populates virtuals, and we want
320 # self.pkgsettings to have them populated too.
321 self._frozen_config.pkgsettings[myroot] = \
322 portage.config(clone=fake_vartree.settings)
324 if preload_installed_pkgs:
325 vardb = fake_vartree.dbapi
326 fakedb = self._dynamic_config._graph_trees[
327 myroot]["vartree"].dbapi
330 self._spinner_update()
331 # This triggers metadata updates via FakeVartree.
332 vardb.aux_get(pkg.cpv, [])
333 fakedb.cpv_inject(pkg)
335 # Now that the vardb state is cached in our FakeVartree,
336 # we won't be needing the real vartree cache for awhile.
337 # To make some room on the heap, clear the vardbapi
339 self._frozen_config._trees_orig[myroot
340 ]["vartree"].dbapi._clear_cache()
343 self._dynamic_config._vdb_loaded = True
345 def _spinner_update(self):
346 if self._frozen_config.spinner:
347 self._frozen_config.spinner.update()
349 def _show_missed_update(self):
351 if '--quiet' in self._frozen_config.myopts and \
352 '--debug' not in self._frozen_config.myopts:
355 # In order to minimize noise, show only the highest
356 # missed update from each SLOT.
358 for pkg, mask_reasons in \
359 self._dynamic_config._runtime_pkg_mask.items():
361 # Exclude installed here since we only
362 # want to show available updates.
364 k = (pkg.root, pkg.slot_atom)
365 if k in missed_updates:
366 other_pkg, mask_type, parent_atoms = missed_updates[k]
369 for mask_type, parent_atoms in mask_reasons.items():
372 missed_updates[k] = (pkg, mask_type, parent_atoms)
375 if not missed_updates:
378 missed_update_types = {}
379 for pkg, mask_type, parent_atoms in missed_updates.values():
380 missed_update_types.setdefault(mask_type,
381 []).append((pkg, parent_atoms))
383 self._show_missed_update_slot_conflicts(
384 missed_update_types.get("slot conflict"))
386 self._show_missed_update_unsatisfied_dep(
387 missed_update_types.get("missing dependency"))
389 def _show_missed_update_unsatisfied_dep(self, missed_updates):
391 if not missed_updates:
394 backtrack_masked = []
396 for pkg, parent_atoms in missed_updates:
399 for parent, root, atom in parent_atoms:
400 self._show_unsatisfied_dep(root, atom, myparent=parent,
401 check_backtrack=True)
402 except self._backtrack_mask:
403 # This is displayed below in abbreviated form.
404 backtrack_masked.append((pkg, parent_atoms))
407 writemsg("\n!!! The following update has been skipped " + \
408 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
410 writemsg(str(pkg.slot_atom), noiselevel=-1)
412 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
413 writemsg("\n", noiselevel=-1)
415 for parent, root, atom in parent_atoms:
416 self._show_unsatisfied_dep(root, atom, myparent=parent)
417 writemsg("\n", noiselevel=-1)
420 # These are shown in abbreviated form, in order to avoid terminal
421 # flooding from mask messages as reported in bug #285832.
422 writemsg("\n!!! The following update(s) have been skipped " + \
423 "due to unsatisfied dependencies\n" + \
424 "!!! triggered by backtracking:\n\n", noiselevel=-1)
425 for pkg, parent_atoms in backtrack_masked:
426 writemsg(str(pkg.slot_atom), noiselevel=-1)
428 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
429 writemsg("\n", noiselevel=-1)
431 def _show_missed_update_slot_conflicts(self, missed_updates):
433 if not missed_updates:
437 msg.append("\n!!! One or more updates have been skipped due to " + \
438 "a dependency conflict:\n\n")
441 for pkg, parent_atoms in missed_updates:
442 msg.append(str(pkg.slot_atom))
444 msg.append(" for %s" % (pkg.root,))
447 for parent, atom in parent_atoms:
451 msg.append(" conflicts with\n")
453 if isinstance(parent,
454 (PackageArg, AtomArg)):
455 # For PackageArg and AtomArg types, it's
456 # redundant to display the atom attribute.
457 msg.append(str(parent))
459 # Display the specific atom from SetArg or
461 msg.append("%s required by %s" % (atom, parent))
465 writemsg("".join(msg), noiselevel=-1)
467 def _show_slot_collision_notice(self):
468 """Show an informational message advising the user to mask one of the
469 the packages. In some cases it may be possible to resolve this
470 automatically, but support for backtracking (removal nodes that have
471 already been selected) will be required in order to handle all possible
475 if not self._dynamic_config._slot_collision_info:
478 self._show_merge_list()
480 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
481 handler = self._dynamic_config._slot_conflict_handler
483 conflict = handler.get_conflict()
484 writemsg(conflict, noiselevel=-1)
486 explanation = handler.get_explanation()
488 writemsg(explanation, noiselevel=-1)
491 if "--quiet" in self._frozen_config.myopts:
495 msg.append("It may be possible to solve this problem ")
496 msg.append("by using package.mask to prevent one of ")
497 msg.append("those packages from being selected. ")
498 msg.append("However, it is also possible that conflicting ")
499 msg.append("dependencies exist such that they are impossible to ")
500 msg.append("satisfy simultaneously. If such a conflict exists in ")
501 msg.append("the dependencies of two different packages, then those ")
502 msg.append("packages can not be installed simultaneously.")
503 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
504 if not self._dynamic_config._allow_backtracking and \
505 (backtrack_opt is None or \
506 (backtrack_opt > 0 and backtrack_opt < 30)):
507 msg.append(" You may want to try a larger value of the ")
508 msg.append("--backtrack option, such as --backtrack=30, ")
509 msg.append("in order to see if that will solve this conflict ")
510 msg.append("automatically.")
512 for line in textwrap.wrap(''.join(msg), 70):
513 writemsg(line + '\n', noiselevel=-1)
514 writemsg('\n', noiselevel=-1)
517 msg.append("For more information, see MASKED PACKAGES ")
518 msg.append("section in the emerge man page or refer ")
519 msg.append("to the Gentoo Handbook.")
520 for line in textwrap.wrap(''.join(msg), 70):
521 writemsg(line + '\n', noiselevel=-1)
522 writemsg('\n', noiselevel=-1)
524 def _process_slot_conflicts(self):
526 Process slot conflict data to identify specific atoms which
527 lead to conflict. These atoms only match a subset of the
528 packages that have been pulled into a given slot.
530 for (slot_atom, root), slot_nodes \
531 in self._dynamic_config._slot_collision_info.items():
533 all_parent_atoms = set()
534 for pkg in slot_nodes:
535 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
538 all_parent_atoms.update(parent_atoms)
540 for pkg in slot_nodes:
541 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
542 if parent_atoms is None:
544 self._dynamic_config._parent_atoms[pkg] = parent_atoms
545 for parent_atom in all_parent_atoms:
546 if parent_atom in parent_atoms:
548 # Use package set for matching since it will match via
549 # PROVIDE when necessary, while match_from_list does not.
550 parent, atom = parent_atom
551 atom_set = InternalPackageSet(
552 initial_atoms=(atom,), allow_repo=True)
553 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
554 parent_atoms.add(parent_atom)
556 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
558 def _reinstall_for_flags(self, forced_flags,
559 orig_use, orig_iuse, cur_use, cur_iuse):
560 """Return a set of flags that trigger reinstallation, or None if there
561 are no such flags."""
562 if "--newuse" in self._frozen_config.myopts or \
563 "--binpkg-respect-use" in self._frozen_config.myopts:
564 flags = set(orig_iuse.symmetric_difference(
565 cur_iuse).difference(forced_flags))
566 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
567 cur_iuse.intersection(cur_use)))
570 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
571 flags = orig_iuse.intersection(orig_use).symmetric_difference(
572 cur_iuse.intersection(cur_use))
577 def _create_graph(self, allow_unsatisfied=False):
578 dep_stack = self._dynamic_config._dep_stack
579 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
580 while dep_stack or dep_disjunctive_stack:
581 self._spinner_update()
583 dep = dep_stack.pop()
584 if isinstance(dep, Package):
585 if not self._add_pkg_deps(dep,
586 allow_unsatisfied=allow_unsatisfied):
589 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
591 if dep_disjunctive_stack:
592 if not self._pop_disjunction(allow_unsatisfied):
596 def _expand_set_args(self, input_args, add_to_digraph=False):
598 Iterate over a list of DependencyArg instances and yield all
599 instances given in the input together with additional SetArg
600 instances that are generated from nested sets.
601 @param input_args: An iterable of DependencyArg instances
602 @type input_args: Iterable
603 @param add_to_digraph: If True then add SetArg instances
604 to the digraph, in order to record parent -> child
605 relationships from nested sets
606 @type add_to_digraph: Boolean
608 @returns: All args given in the input together with additional
609 SetArg instances that are generated from nested sets
612 traversed_set_args = set()
614 for arg in input_args:
615 if not isinstance(arg, SetArg):
619 root_config = arg.root_config
620 depgraph_sets = self._dynamic_config.sets[root_config.root]
623 arg = arg_stack.pop()
624 if arg in traversed_set_args:
626 traversed_set_args.add(arg)
629 # Traverse nested sets and add them to the stack
630 # if they're not already in the graph. Also, graph
631 # edges between parent and nested sets.
632 for token in arg.pset.getNonAtoms():
633 if not token.startswith(SETPREFIX):
635 s = token[len(SETPREFIX):]
636 nested_set = depgraph_sets.sets.get(s)
637 if nested_set is None:
638 nested_set = root_config.sets.get(s)
639 if nested_set is not None:
640 nested_arg = SetArg(arg=token, pset=nested_set,
641 root_config=root_config)
642 arg_stack.append(nested_arg)
644 self._dynamic_config.digraph.add(nested_arg, arg,
645 priority=BlockerDepPriority.instance)
646 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
648 def _add_dep(self, dep, allow_unsatisfied=False):
649 debug = "--debug" in self._frozen_config.myopts
650 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
651 nodeps = "--nodeps" in self._frozen_config.myopts
652 empty = "empty" in self._dynamic_config.myparams
653 deep = self._dynamic_config.myparams.get("deep", 0)
654 recurse = empty or deep is True or dep.depth <= deep
656 if not buildpkgonly and \
658 dep.parent not in self._dynamic_config._slot_collision_nodes:
659 if dep.parent.onlydeps:
660 # It's safe to ignore blockers if the
661 # parent is an --onlydeps node.
663 # The blocker applies to the root where
664 # the parent is or will be installed.
665 blocker = Blocker(atom=dep.atom,
666 eapi=dep.parent.metadata["EAPI"],
667 priority=dep.priority, root=dep.parent.root)
668 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
671 if dep.child is None:
672 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
673 onlydeps=dep.onlydeps)
675 # The caller has selected a specific package
676 # via self._minimize_packages().
678 existing_node = self._dynamic_config._slot_pkg_map[
679 dep.root].get(dep_pkg.slot_atom)
680 if existing_node is not dep_pkg:
684 if dep.priority.optional:
685 # This could be an unecessary build-time dep
686 # pulled in by --with-bdeps=y.
688 if allow_unsatisfied:
689 self._dynamic_config._unsatisfied_deps.append(dep)
691 self._dynamic_config._unsatisfied_deps_for_display.append(
692 ((dep.root, dep.atom), {"myparent":dep.parent}))
694 # The parent node should not already be in
695 # runtime_pkg_mask, since that would trigger an
696 # infinite backtracking loop.
697 if self._dynamic_config._allow_backtracking:
698 if dep.parent in self._dynamic_config._runtime_pkg_mask:
699 if "--debug" in self._frozen_config.myopts:
701 "!!! backtracking loop detected: %s %s\n" % \
703 self._dynamic_config._runtime_pkg_mask[
704 dep.parent]), noiselevel=-1)
705 elif not self.need_restart():
706 # Do not backtrack if only USE have to be changed in
707 # order to satisfy the dependency.
708 dep_pkg, existing_node = \
709 self._select_package(dep.root, dep.atom.without_use,
710 onlydeps=dep.onlydeps)
712 self._dynamic_config._backtrack_infos["missing dependency"] = dep
713 self._dynamic_config._need_restart = True
714 if "--debug" in self._frozen_config.myopts:
718 msg.append("backtracking due to unsatisfied dep:")
719 msg.append(" parent: %s" % dep.parent)
720 msg.append(" priority: %s" % dep.priority)
721 msg.append(" root: %s" % dep.root)
722 msg.append(" atom: %s" % dep.atom)
724 writemsg_level("".join("%s\n" % l for l in msg),
725 noiselevel=-1, level=logging.DEBUG)
728 # In some cases, dep_check will return deps that shouldn't
729 # be proccessed any further, so they are identified and
730 # discarded here. Try to discard as few as possible since
731 # discarded dependencies reduce the amount of information
732 # available for optimization of merge order.
733 if dep.priority.satisfied and \
734 dep.priority.satisfied.visible and \
735 not dep_pkg.installed and \
736 not (existing_node or recurse):
738 if dep.root == self._frozen_config.target_root:
740 myarg = next(self._iter_atoms_for_pkg(dep_pkg))
741 except StopIteration:
743 except portage.exception.InvalidDependString:
744 if not dep_pkg.installed:
745 # This shouldn't happen since the package
746 # should have been masked.
749 # Existing child selection may not be valid unless
750 # it's added to the graph immediately, since "complete"
751 # mode may select a different child later.
753 self._dynamic_config._ignored_deps.append(dep)
756 if not self._add_pkg(dep_pkg, dep):
760 def _add_pkg(self, pkg, dep):
767 myparent = dep.parent
768 priority = dep.priority
771 priority = DepPriority()
773 Fills the digraph with nodes comprised of packages to merge.
774 mybigkey is the package spec of the package to merge.
775 myparent is the package depending on mybigkey ( or None )
776 addme = Should we add this package to the digraph or are we just looking at it's deps?
777 Think --onlydeps, we need to ignore packages in that case.
780 #IUSE-aware emerge -> USE DEP aware depgraph
781 #"no downgrade" emerge
783 # Ensure that the dependencies of the same package
784 # are never processed more than once.
785 previously_added = pkg in self._dynamic_config.digraph
787 # select the correct /var database that we'll be checking against
788 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
789 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
794 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
795 except portage.exception.InvalidDependString as e:
796 if not pkg.installed:
797 # should have been masked before it was selected
802 if not pkg.installed and \
803 "empty" not in self._dynamic_config.myparams and \
804 vardbapi.match(pkg.slot_atom):
805 # Increase the priority of dependencies on packages that
806 # are being rebuilt. This optimizes merge order so that
807 # dependencies are rebuilt/updated as soon as possible,
808 # which is needed especially when emerge is called by
809 # revdep-rebuild since dependencies may be affected by ABI
810 # breakage that has rendered them useless. Don't adjust
811 # priority here when in "empty" mode since all packages
812 # are being merged in that case.
813 priority.rebuild = True
815 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
816 slot_collision = False
818 existing_node_matches = pkg.cpv == existing_node.cpv
819 if existing_node_matches and \
820 pkg != existing_node and \
821 dep.atom is not None:
822 # Use package set for matching since it will match via
823 # PROVIDE when necessary, while match_from_list does not.
824 atom_set = InternalPackageSet(initial_atoms=[dep.atom],
826 if not atom_set.findAtomForPackage(existing_node, \
827 modified_use=self._pkg_use_enabled(existing_node)):
828 existing_node_matches = False
829 if existing_node_matches:
830 # The existing node can be reused.
832 for parent_atom in arg_atoms:
833 parent, atom = parent_atom
834 self._dynamic_config.digraph.add(existing_node, parent,
836 self._add_parent_atom(existing_node, parent_atom)
837 # If a direct circular dependency is not an unsatisfied
838 # buildtime dependency then drop it here since otherwise
839 # it can skew the merge order calculation in an unwanted
841 if existing_node != myparent or \
842 (priority.buildtime and not priority.satisfied):
843 self._dynamic_config.digraph.addnode(existing_node, myparent,
845 if dep.atom is not None and dep.parent is not None:
846 self._add_parent_atom(existing_node,
847 (dep.parent, dep.atom))
850 # A slot conflict has occurred.
851 # The existing node should not already be in
852 # runtime_pkg_mask, since that would trigger an
853 # infinite backtracking loop.
854 if self._dynamic_config._allow_backtracking and \
856 self._dynamic_config._runtime_pkg_mask:
857 if "--debug" in self._frozen_config.myopts:
859 "!!! backtracking loop detected: %s %s\n" % \
861 self._dynamic_config._runtime_pkg_mask[
862 existing_node]), noiselevel=-1)
863 elif self._dynamic_config._allow_backtracking and \
864 not self._accept_blocker_conflicts() and \
865 not self.need_restart():
867 self._add_slot_conflict(pkg)
868 if dep.atom is not None and dep.parent is not None:
869 self._add_parent_atom(pkg, (dep.parent, dep.atom))
872 for parent_atom in arg_atoms:
873 parent, atom = parent_atom
874 self._add_parent_atom(pkg, parent_atom)
875 self._process_slot_conflicts()
880 # The ordering of backtrack_data can make
881 # a difference here, because both mask actions may lead
882 # to valid, but different, solutions and the one with
883 # 'existing_node' masked is usually the better one. Because
884 # of that, we choose an order such that
885 # the backtracker will first explore the choice with
886 # existing_node masked. The backtracker reverses the
887 # order, so the order it uses is the reverse of the
888 # order shown here. See bug #339606.
889 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
890 # For missed update messages, find out which
891 # atoms matched to_be_selected that did not
892 # match to_be_masked.
894 self._dynamic_config._parent_atoms.get(to_be_selected, set())
896 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
898 parent_atoms = conflict_atoms
900 all_parents.update(parent_atoms)
903 for parent, atom in parent_atoms:
904 i = InternalPackageSet(initial_atoms=(atom,),
906 if not i.findAtomForPackage(to_be_masked):
910 if to_be_selected >= to_be_masked:
911 # We only care about the parent atoms
912 # when they trigger a downgrade.
915 fallback_data.append((to_be_masked, parent_atoms))
918 # 'to_be_masked' does not violate any parent atom, which means
919 # there is no point in masking it.
922 backtrack_data.append((to_be_masked, parent_atoms))
924 if not backtrack_data:
925 # This shouldn't happen, but fall back to the old
926 # behavior if this gets triggered somehow.
927 backtrack_data = fallback_data
929 if len(backtrack_data) > 1:
930 # NOTE: Generally, we prefer to mask the higher
931 # version since this solves common cases in which a
932 # lower version is needed so that all dependencies
933 # will be satisfied (bug #337178). However, if
934 # existing_node happens to be installed then we
935 # mask that since this is a common case that is
936 # triggered when --update is not enabled.
937 if existing_node.installed:
939 elif pkg > existing_node:
940 backtrack_data.reverse()
942 to_be_masked = backtrack_data[-1][0]
944 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
945 self._dynamic_config._need_restart = True
946 if "--debug" in self._frozen_config.myopts:
950 msg.append("backtracking due to slot conflict:")
951 if backtrack_data is fallback_data:
952 msg.append("!!! backtrack_data fallback")
953 msg.append(" first package: %s" % existing_node)
954 msg.append(" second package: %s" % pkg)
955 msg.append(" package to mask: %s" % to_be_masked)
956 msg.append(" slot: %s" % pkg.slot_atom)
957 msg.append(" parents: %s" % ", ".join( \
958 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
960 writemsg_level("".join("%s\n" % l for l in msg),
961 noiselevel=-1, level=logging.DEBUG)
964 # A slot collision has occurred. Sometimes this coincides
965 # with unresolvable blockers, so the slot collision will be
966 # shown later if there are no unresolvable blockers.
967 self._add_slot_conflict(pkg)
968 slot_collision = True
971 # Now add this node to the graph so that self.display()
972 # can show use flags and --tree portage.output. This node is
973 # only being partially added to the graph. It must not be
974 # allowed to interfere with the other nodes that have been
975 # added. Do not overwrite data for existing nodes in
976 # self._dynamic_config.mydbapi since that data will be used for blocker
978 # Even though the graph is now invalid, continue to process
979 # dependencies so that things like --fetchonly can still
980 # function despite collisions.
982 elif not previously_added:
983 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
984 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
985 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
986 self._dynamic_config._highest_pkg_cache.clear()
987 self._check_masks(pkg)
989 if not pkg.installed:
990 # Allow this package to satisfy old-style virtuals in case it
991 # doesn't already. Any pre-existing providers will be preferred
994 pkgsettings.setinst(pkg.cpv, pkg.metadata)
995 # For consistency, also update the global virtuals.
996 settings = self._frozen_config.roots[pkg.root].settings
998 settings.setinst(pkg.cpv, pkg.metadata)
1000 except portage.exception.InvalidDependString as e:
1001 if not pkg.installed:
1002 # should have been masked before it was selected
1006 self._dynamic_config._set_nodes.add(pkg)
1008 # Do this even when addme is False (--onlydeps) so that the
1009 # parent/child relationship is always known in case
1010 # self._show_slot_collision_notice() needs to be called later.
1011 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1012 if dep.atom is not None and dep.parent is not None:
1013 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1016 for parent_atom in arg_atoms:
1017 parent, atom = parent_atom
1018 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1019 self._add_parent_atom(pkg, parent_atom)
1021 """ This section determines whether we go deeper into dependencies or not.
1022 We want to go deeper on a few occasions:
1023 Installing package A, we need to make sure package A's deps are met.
1024 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1025 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1030 deep = self._dynamic_config.myparams.get("deep", 0)
1031 empty = "empty" in self._dynamic_config.myparams
1032 recurse = empty or deep is True or depth + 1 <= deep
1033 dep_stack = self._dynamic_config._dep_stack
1034 if "recurse" not in self._dynamic_config.myparams:
1036 elif pkg.installed and not recurse:
1037 dep_stack = self._dynamic_config._ignored_deps
1039 self._spinner_update()
1041 if not previously_added:
1042 dep_stack.append(pkg)
1045 def _check_masks(self, pkg):
1047 slot_key = (pkg.root, pkg.slot_atom)
1049 # Check for upgrades in the same slot that are
1050 # masked due to a LICENSE change in a newer
1051 # version that is not masked for any other reason.
1052 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1053 if other_pkg is not None and pkg < other_pkg:
1054 self._dynamic_config._masked_license_updates.add(other_pkg)
1056 def _add_parent_atom(self, pkg, parent_atom):
1057 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1058 if parent_atoms is None:
1059 parent_atoms = set()
1060 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1061 parent_atoms.add(parent_atom)
1063 def _add_slot_conflict(self, pkg):
1064 self._dynamic_config._slot_collision_nodes.add(pkg)
1065 slot_key = (pkg.slot_atom, pkg.root)
1066 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1067 if slot_nodes is None:
1069 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1070 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1073 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1075 mytype = pkg.type_name
1078 metadata = pkg.metadata
1079 myuse = self._pkg_use_enabled(pkg)
1081 depth = pkg.depth + 1
1082 removal_action = "remove" in self._dynamic_config.myparams
1085 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1087 edepend[k] = metadata[k]
1089 if not pkg.built and \
1090 "--buildpkgonly" in self._frozen_config.myopts and \
1091 "deep" not in self._dynamic_config.myparams and \
1092 "empty" not in self._dynamic_config.myparams:
1093 edepend["RDEPEND"] = ""
1094 edepend["PDEPEND"] = ""
1096 if pkg.built and not removal_action:
1097 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1098 # Pull in build time deps as requested, but marked them as
1099 # "optional" since they are not strictly required. This allows
1100 # more freedom in the merge order calculation for solving
1101 # circular dependencies. Don't convert to PDEPEND since that
1102 # could make --with-bdeps=y less effective if it is used to
1103 # adjust merge order to prevent built_with_use() calls from
1107 # built packages do not have build time dependencies.
1108 edepend["DEPEND"] = ""
1110 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1111 edepend["DEPEND"] = ""
1114 depend_root = myroot
1117 root_deps = self._frozen_config.myopts.get("--root-deps")
1118 if root_deps is not None:
1119 if root_deps is True:
1120 depend_root = myroot
1121 elif root_deps == "rdeps":
1122 edepend["DEPEND"] = ""
1125 (depend_root, edepend["DEPEND"],
1126 self._priority(buildtime=(not pkg.built),
1127 optional=pkg.built),
1129 (myroot, edepend["RDEPEND"],
1130 self._priority(runtime=True),
1132 (myroot, edepend["PDEPEND"],
1133 self._priority(runtime_post=True),
1137 debug = "--debug" in self._frozen_config.myopts
1138 strict = mytype != "installed"
1140 for dep_root, dep_string, dep_priority, ignore_blockers in deps:
1144 writemsg_level("\nParent: %s\n" % (pkg,),
1145 noiselevel=-1, level=logging.DEBUG)
1146 writemsg_level("Depstring: %s\n" % (dep_string,),
1147 noiselevel=-1, level=logging.DEBUG)
1148 writemsg_level("Priority: %s\n" % (dep_priority,),
1149 noiselevel=-1, level=logging.DEBUG)
1152 dep_string = portage.dep.use_reduce(dep_string,
1153 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1154 except portage.exception.InvalidDependString as e:
1155 if not pkg.installed:
1156 # should have been masked before it was selected
1160 # Try again, but omit the is_valid_flag argument, since
1161 # invalid USE conditionals are a common problem and it's
1162 # practical to ignore this issue for installed packages.
1164 dep_string = portage.dep.use_reduce(dep_string,
1165 uselist=self._pkg_use_enabled(pkg))
1166 except portage.exception.InvalidDependString as e:
1167 self._dynamic_config._masked_installed.add(pkg)
1172 dep_string = list(self._queue_disjunctive_deps(
1173 pkg, dep_root, dep_priority, dep_string))
1174 except portage.exception.InvalidDependString as e:
1176 self._dynamic_config._masked_installed.add(pkg)
1180 # should have been masked before it was selected
1186 dep_string = portage.dep.paren_enclose(dep_string)
1188 if not self._add_pkg_dep_string(
1189 pkg, dep_root, dep_priority, dep_string,
1190 allow_unsatisfied, ignore_blockers=ignore_blockers):
1193 except portage.exception.AmbiguousPackageName as e:
1195 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1196 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1198 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1199 portage.writemsg("\n", noiselevel=-1)
1200 if mytype == "binary":
1202 "!!! This binary package cannot be installed: '%s'\n" % \
1203 mykey, noiselevel=-1)
1204 elif mytype == "ebuild":
1205 portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
1206 myebuild, mylocation = portdb.findname2(mykey, myrepo = pkg.repo)
1207 portage.writemsg("!!! This ebuild cannot be installed: " + \
1208 "'%s'\n" % myebuild, noiselevel=-1)
1209 portage.writemsg("!!! Please notify the package maintainer " + \
1210 "that atoms must be fully-qualified.\n", noiselevel=-1)
1212 self._dynamic_config._traversed_pkg_deps.add(pkg)
1215 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1216 allow_unsatisfied, ignore_blockers=False):
1217 depth = pkg.depth + 1
1218 debug = "--debug" in self._frozen_config.myopts
1219 strict = pkg.type_name != "installed"
1222 writemsg_level("\nParent: %s\n" % (pkg,),
1223 noiselevel=-1, level=logging.DEBUG)
1224 writemsg_level("Depstring: %s\n" % (dep_string,),
1225 noiselevel=-1, level=logging.DEBUG)
1226 writemsg_level("Priority: %s\n" % (dep_priority,),
1227 noiselevel=-1, level=logging.DEBUG)
1230 selected_atoms = self._select_atoms(dep_root,
1231 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1232 strict=strict, priority=dep_priority)
1233 except portage.exception.InvalidDependString as e:
1235 self._dynamic_config._masked_installed.add(pkg)
1238 # should have been masked before it was selected
1242 writemsg_level("Candidates: %s\n" % \
1243 ([str(x) for x in selected_atoms[pkg]],),
1244 noiselevel=-1, level=logging.DEBUG)
1246 root_config = self._frozen_config.roots[dep_root]
1247 vardb = root_config.trees["vartree"].dbapi
1249 for atom, child in self._minimize_children(
1250 pkg, dep_priority, root_config, selected_atoms[pkg]):
1252 if ignore_blockers and atom.blocker:
1253 # For --with-bdeps, ignore build-time only blockers
1254 # that originate from built packages.
1257 mypriority = dep_priority.copy()
1258 if not atom.blocker:
1259 inst_pkgs = vardb.match_pkgs(atom)
1261 for inst_pkg in inst_pkgs:
1262 if self._pkg_visibility_check(inst_pkg):
1264 mypriority.satisfied = inst_pkg
1266 if not mypriority.satisfied:
1267 # none visible, so use highest
1268 mypriority.satisfied = inst_pkgs[0]
1270 if not self._add_dep(Dependency(atom=atom,
1271 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1272 priority=mypriority, root=dep_root),
1273 allow_unsatisfied=allow_unsatisfied):
1276 selected_atoms.pop(pkg)
1278 # Add selected indirect virtual deps to the graph. This
1279 # takes advantage of circular dependency avoidance that's done
1280 # by dep_zapdeps. We preserve actual parent/child relationships
1281 # here in order to avoid distorting the dependency graph like
1282 # <=portage-2.1.6.x did.
1283 for virt_pkg, atoms in selected_atoms.items():
1286 writemsg_level("Candidates: %s: %s\n" % \
1287 (virt_pkg.cpv, [str(x) for x in atoms]),
1288 noiselevel=-1, level=logging.DEBUG)
1290 # Just assume depth + 1 here for now, though it's not entirely
1291 # accurate since multilple levels of indirect virtual deps may
1292 # have been traversed. The _add_pkg call will reset the depth to
1293 # 0 if this package happens to match an argument.
1294 if not self._add_pkg(virt_pkg,
1295 Dependency(atom=Atom('=' + virt_pkg.cpv),
1296 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1300 for atom, child in self._minimize_children(
1301 pkg, self._priority(runtime=True), root_config, atoms):
1302 # This is a GLEP 37 virtual, so its deps are all runtime.
1303 mypriority = self._priority(runtime=True)
1304 if not atom.blocker:
1305 inst_pkgs = vardb.match_pkgs(atom)
1307 for inst_pkg in inst_pkgs:
1308 if self._pkg_visibility_check(inst_pkg):
1310 mypriority.satisfied = inst_pkg
1312 if not mypriority.satisfied:
1313 # none visible, so use highest
1314 mypriority.satisfied = inst_pkgs[0]
1316 if not self._add_dep(Dependency(atom=atom,
1317 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1318 parent=virt_pkg, priority=mypriority, root=dep_root),
1319 allow_unsatisfied=allow_unsatisfied):
1323 writemsg_level("Exiting... %s\n" % (pkg,),
1324 noiselevel=-1, level=logging.DEBUG)
1328 def _minimize_children(self, parent, priority, root_config, atoms):
1330 Selects packages to satisfy the given atoms, and minimizes the
1331 number of selected packages. This serves to identify and eliminate
1332 redundant package selections when multiple atoms happen to specify
1342 dep_pkg, existing_node = self._select_package(
1343 root_config.root, atom)
1347 atom_pkg_map[atom] = dep_pkg
1349 if len(atom_pkg_map) < 2:
1350 for item in atom_pkg_map.items():
1356 for atom, pkg in atom_pkg_map.items():
1357 pkg_atom_map.setdefault(pkg, set()).add(atom)
1358 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1360 for cp, pkgs in cp_pkg_map.items():
1363 for atom in pkg_atom_map[pkg]:
1367 # Use a digraph to identify and eliminate any
1368 # redundant package selections.
1369 atom_pkg_graph = digraph()
1372 for atom in pkg_atom_map[pkg1]:
1374 atom_pkg_graph.add(pkg1, atom)
1375 atom_set = InternalPackageSet(initial_atoms=(atom,),
1380 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1381 atom_pkg_graph.add(pkg2, atom)
1384 eliminate_pkg = True
1385 for atom in atom_pkg_graph.parent_nodes(pkg):
1386 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1387 eliminate_pkg = False
1390 atom_pkg_graph.remove(pkg)
1392 # Yield < and <= atoms first, since those are more likely to
1393 # cause slot conflicts, and we want those atoms to be displayed
1394 # in the resulting slot conflict message (see bug #291142).
1397 for atom in cp_atoms:
1398 if atom.operator in ('<', '<='):
1399 less_than.append(atom)
1401 not_less_than.append(atom)
1403 for atom in chain(less_than, not_less_than):
1404 child_pkgs = atom_pkg_graph.child_nodes(atom)
1405 yield (atom, child_pkgs[0])
1407 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1409 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1410 Yields non-disjunctive deps. Raises InvalidDependString when
1414 while i < len(dep_struct):
1416 if isinstance(x, list):
1417 for y in self._queue_disjunctive_deps(
1418 pkg, dep_root, dep_priority, x):
1421 self._queue_disjunction(pkg, dep_root, dep_priority,
1422 [ x, dep_struct[ i + 1 ] ] )
1426 x = portage.dep.Atom(x)
1427 except portage.exception.InvalidAtom:
1428 if not pkg.installed:
1429 raise portage.exception.InvalidDependString(
1430 "invalid atom: '%s'" % x)
1432 # Note: Eventually this will check for PROPERTIES=virtual
1433 # or whatever other metadata gets implemented for this
1435 if x.cp.startswith('virtual/'):
1436 self._queue_disjunction( pkg, dep_root,
1437 dep_priority, [ str(x) ] )
1442 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1443 self._dynamic_config._dep_disjunctive_stack.append(
1444 (pkg, dep_root, dep_priority, dep_struct))
1446 def _pop_disjunction(self, allow_unsatisfied):
1448 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1449 populate self._dynamic_config._dep_stack.
1451 pkg, dep_root, dep_priority, dep_struct = \
1452 self._dynamic_config._dep_disjunctive_stack.pop()
1453 dep_string = portage.dep.paren_enclose(dep_struct)
1454 if not self._add_pkg_dep_string(
1455 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1459 def _priority(self, **kwargs):
1460 if "remove" in self._dynamic_config.myparams:
1461 priority_constructor = UnmergeDepPriority
1463 priority_constructor = DepPriority
1464 return priority_constructor(**kwargs)
1466 def _dep_expand(self, root_config, atom_without_category):
1468 @param root_config: a root config instance
1469 @type root_config: RootConfig
1470 @param atom_without_category: an atom without a category component
1471 @type atom_without_category: String
1473 @returns: a list of atoms containing categories (possibly empty)
1475 null_cp = portage.dep_getkey(insert_category_into_atom(
1476 atom_without_category, "null"))
1477 cat, atom_pn = portage.catsplit(null_cp)
1479 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1481 for db, pkg_type, built, installed, db_keys in dbs:
1482 for cat in db.categories:
1483 if db.cp_list("%s/%s" % (cat, atom_pn)):
1487 for cat in categories:
1488 deps.append(Atom(insert_category_into_atom(
1489 atom_without_category, cat), allow_repo=True))
1492 def _have_new_virt(self, root, atom_cp):
1494 for db, pkg_type, built, installed, db_keys in \
1495 self._dynamic_config._filtered_trees[root]["dbs"]:
1496 if db.cp_list(atom_cp):
1501 def _iter_atoms_for_pkg(self, pkg):
1502 depgraph_sets = self._dynamic_config.sets[pkg.root]
1503 atom_arg_map = depgraph_sets.atom_arg_map
1504 root_config = self._frozen_config.roots[pkg.root]
1505 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1506 if atom.cp != pkg.cp and \
1507 self._have_new_virt(pkg.root, atom.cp):
1510 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1511 visible_pkgs.reverse() # descending order
1513 for visible_pkg in visible_pkgs:
1514 if visible_pkg.cp != atom.cp:
1516 if pkg >= visible_pkg:
1517 # This is descending order, and we're not
1518 # interested in any versions <= pkg given.
1520 if pkg.slot_atom != visible_pkg.slot_atom:
1521 higher_slot = visible_pkg
1523 if higher_slot is not None:
1525 for arg in atom_arg_map[(atom, pkg.root)]:
1526 if isinstance(arg, PackageArg) and \
1531 def select_files(self, myfiles):
1532 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1533 self._dynamic_config._initial_arg_list and call self._resolve to create the
1534 appropriate depgraph and return a favorite list."""
1536 debug = "--debug" in self._frozen_config.myopts
1537 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1538 sets = root_config.sets
1539 depgraph_sets = self._dynamic_config.sets[root_config.root]
1541 myroot = self._frozen_config.target_root
1542 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1543 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1544 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1545 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1546 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1547 pkgsettings = self._frozen_config.pkgsettings[myroot]
1549 onlydeps = "--onlydeps" in self._frozen_config.myopts
1552 ext = os.path.splitext(x)[1]
1554 if not os.path.exists(x):
1556 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1557 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1558 elif os.path.exists(
1559 os.path.join(pkgsettings["PKGDIR"], x)):
1560 x = os.path.join(pkgsettings["PKGDIR"], x)
1562 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1563 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1564 return 0, myfavorites
1565 mytbz2=portage.xpak.tbz2(x)
1566 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1567 if os.path.realpath(x) != \
1568 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1569 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1570 return 0, myfavorites
1572 pkg = self._pkg(mykey, "binary", root_config,
1574 args.append(PackageArg(arg=x, package=pkg,
1575 root_config=root_config))
1576 elif ext==".ebuild":
1577 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1578 pkgdir = os.path.dirname(ebuild_path)
1579 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1580 cp = pkgdir[len(tree_root)+1:]
1581 e = portage.exception.PackageNotFound(
1582 ("%s is not in a valid portage tree " + \
1583 "hierarchy or does not exist") % x)
1584 if not portage.isvalidatom(cp):
1586 cat = portage.catsplit(cp)[0]
1587 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1588 if not portage.isvalidatom("="+mykey):
1590 ebuild_path = portdb.findname(mykey)
1592 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1593 cp, os.path.basename(ebuild_path)):
1594 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1595 return 0, myfavorites
1596 if mykey not in portdb.xmatch(
1597 "match-visible", portage.cpv_getkey(mykey)):
1598 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1599 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1600 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1601 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1604 raise portage.exception.PackageNotFound(
1605 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1606 pkg = self._pkg(mykey, "ebuild", root_config,
1607 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
1608 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
1609 args.append(PackageArg(arg=x, package=pkg,
1610 root_config=root_config))
1611 elif x.startswith(os.path.sep):
1612 if not x.startswith(myroot):
1613 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1614 " $ROOT.\n") % x, noiselevel=-1)
1616 # Queue these up since it's most efficient to handle
1617 # multiple files in a single iter_owners() call.
1618 lookup_owners.append(x)
1620 if x in ("system", "world"):
1622 if x.startswith(SETPREFIX):
1623 s = x[len(SETPREFIX):]
1625 raise portage.exception.PackageSetNotFound(s)
1626 if s in depgraph_sets.sets:
1629 depgraph_sets.sets[s] = pset
1630 args.append(SetArg(arg=x, pset=pset,
1631 root_config=root_config))
1633 if not is_valid_package_atom(x, allow_repo=True):
1634 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1636 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1637 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1639 # Don't expand categories or old-style virtuals here unless
1640 # necessary. Expansion of old-style virtuals here causes at
1641 # least the following problems:
1642 # 1) It's more difficult to determine which set(s) an atom
1643 # came from, if any.
1644 # 2) It takes away freedom from the resolver to choose other
1645 # possible expansions when necessary.
1647 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
1648 root_config=root_config))
1650 expanded_atoms = self._dep_expand(root_config, x)
1651 installed_cp_set = set()
1652 for atom in expanded_atoms:
1653 if vardb.cp_list(atom.cp):
1654 installed_cp_set.add(atom.cp)
1656 if len(installed_cp_set) > 1:
1657 non_virtual_cps = set()
1658 for atom_cp in installed_cp_set:
1659 if not atom_cp.startswith("virtual/"):
1660 non_virtual_cps.add(atom_cp)
1661 if len(non_virtual_cps) == 1:
1662 installed_cp_set = non_virtual_cps
1664 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1665 installed_cp = next(iter(installed_cp_set))
1666 for atom in expanded_atoms:
1667 if atom.cp == installed_cp:
1669 for pkg in self._iter_match_pkgs_any(
1670 root_config, atom.without_use,
1672 if not pkg.installed:
1676 expanded_atoms = [atom]
1679 # If a non-virtual package and one or more virtual packages
1680 # are in expanded_atoms, use the non-virtual package.
1681 if len(expanded_atoms) > 1:
1682 number_of_virtuals = 0
1683 for expanded_atom in expanded_atoms:
1684 if expanded_atom.cp.startswith("virtual/"):
1685 number_of_virtuals += 1
1687 candidate = expanded_atom
1688 if len(expanded_atoms) - number_of_virtuals == 1:
1689 expanded_atoms = [ candidate ]
1691 if len(expanded_atoms) > 1:
1692 writemsg("\n\n", noiselevel=-1)
1693 ambiguous_package_name(x, expanded_atoms, root_config,
1694 self._frozen_config.spinner, self._frozen_config.myopts)
1695 return False, myfavorites
1697 atom = expanded_atoms[0]
1699 null_atom = Atom(insert_category_into_atom(x, "null"))
1700 cat, atom_pn = portage.catsplit(null_atom.cp)
1701 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1703 # Allow the depgraph to choose which virtual.
1704 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1708 args.append(AtomArg(arg=x, atom=atom,
1709 root_config=root_config))
1713 search_for_multiple = False
1714 if len(lookup_owners) > 1:
1715 search_for_multiple = True
1717 for x in lookup_owners:
1718 if not search_for_multiple and os.path.isdir(x):
1719 search_for_multiple = True
1720 relative_paths.append(x[len(myroot)-1:])
1723 for pkg, relative_path in \
1724 real_vardb._owners.iter_owners(relative_paths):
1725 owners.add(pkg.mycpv)
1726 if not search_for_multiple:
1730 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1731 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1735 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1737 # portage now masks packages with missing slot, but it's
1738 # possible that one was installed by an older version
1739 atom = Atom(portage.cpv_getkey(cpv))
1741 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1742 args.append(AtomArg(arg=atom, atom=atom,
1743 root_config=root_config))
1745 if "--update" in self._frozen_config.myopts:
1746 # In some cases, the greedy slots behavior can pull in a slot that
1747 # the user would want to uninstall due to it being blocked by a
1748 # newer version in a different slot. Therefore, it's necessary to
1749 # detect and discard any that should be uninstalled. Each time
1750 # that arguments are updated, package selections are repeated in
1751 # order to ensure consistency with the current arguments:
1753 # 1) Initialize args
1754 # 2) Select packages and generate initial greedy atoms
1755 # 3) Update args with greedy atoms
1756 # 4) Select packages and generate greedy atoms again, while
1757 # accounting for any blockers between selected packages
1758 # 5) Update args with revised greedy atoms
1760 self._set_args(args)
1763 greedy_args.append(arg)
1764 if not isinstance(arg, AtomArg):
1766 for atom in self._greedy_slots(arg.root_config, arg.atom):
1768 AtomArg(arg=arg.arg, atom=atom,
1769 root_config=arg.root_config))
1771 self._set_args(greedy_args)
1774 # Revise greedy atoms, accounting for any blockers
1775 # between selected packages.
1776 revised_greedy_args = []
1778 revised_greedy_args.append(arg)
1779 if not isinstance(arg, AtomArg):
1781 for atom in self._greedy_slots(arg.root_config, arg.atom,
1782 blocker_lookahead=True):
1783 revised_greedy_args.append(
1784 AtomArg(arg=arg.arg, atom=atom,
1785 root_config=arg.root_config))
1786 args = revised_greedy_args
1787 del revised_greedy_args
1789 self._set_args(args)
1791 myfavorites = set(myfavorites)
1793 if isinstance(arg, (AtomArg, PackageArg)):
1794 myfavorites.add(arg.atom)
1795 elif isinstance(arg, SetArg):
1796 myfavorites.add(arg.arg)
1797 myfavorites = list(myfavorites)
1800 portage.writemsg("\n", noiselevel=-1)
1801 # Order needs to be preserved since a feature of --nodeps
1802 # is to allow the user to force a specific merge order.
1803 self._dynamic_config._initial_arg_list = args[:]
1805 return self._resolve(myfavorites)
1807 def _resolve(self, myfavorites):
1808 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1809 call self._creategraph to process theier deps and return
1811 debug = "--debug" in self._frozen_config.myopts
1812 onlydeps = "--onlydeps" in self._frozen_config.myopts
1813 myroot = self._frozen_config.target_root
1814 pkgsettings = self._frozen_config.pkgsettings[myroot]
1815 pprovideddict = pkgsettings.pprovideddict
1816 virtuals = pkgsettings.getvirtuals()
1817 for arg in self._expand_set_args(
1818 self._dynamic_config._initial_arg_list,
1819 add_to_digraph=True):
1820 for atom in arg.pset.getAtoms():
1821 self._spinner_update()
1822 dep = Dependency(atom=atom, onlydeps=onlydeps,
1823 root=myroot, parent=arg)
1825 pprovided = pprovideddict.get(atom.cp)
1826 if pprovided and portage.match_from_list(atom, pprovided):
1827 # A provided package has been specified on the command line.
1828 self._dynamic_config._pprovided_args.append((arg, atom))
1830 if isinstance(arg, PackageArg):
1831 if not self._add_pkg(arg.package, dep) or \
1832 not self._create_graph():
1833 if not self._dynamic_config._need_restart:
1834 sys.stderr.write(("\n\n!!! Problem " + \
1835 "resolving dependencies for %s\n") % \
1837 return 0, myfavorites
1840 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1841 (arg, atom), noiselevel=-1)
1842 pkg, existing_node = self._select_package(
1843 myroot, atom, onlydeps=onlydeps)
1845 pprovided_match = False
1846 for virt_choice in virtuals.get(atom.cp, []):
1847 expanded_atom = portage.dep.Atom(
1848 atom.replace(atom.cp, virt_choice.cp, 1))
1849 pprovided = pprovideddict.get(expanded_atom.cp)
1851 portage.match_from_list(expanded_atom, pprovided):
1852 # A provided package has been
1853 # specified on the command line.
1854 self._dynamic_config._pprovided_args.append((arg, atom))
1855 pprovided_match = True
1860 if not (isinstance(arg, SetArg) and \
1861 arg.name in ("selected", "system", "world")):
1862 self._dynamic_config._unsatisfied_deps_for_display.append(
1863 ((myroot, atom), {"myparent" : arg}))
1864 return 0, myfavorites
1865 self._dynamic_config._missing_args.append((arg, atom))
1867 if atom.cp != pkg.cp:
1868 # For old-style virtuals, we need to repeat the
1869 # package.provided check against the selected package.
1870 expanded_atom = atom.replace(atom.cp, pkg.cp)
1871 pprovided = pprovideddict.get(pkg.cp)
1873 portage.match_from_list(expanded_atom, pprovided):
1874 # A provided package has been
1875 # specified on the command line.
1876 self._dynamic_config._pprovided_args.append((arg, atom))
1878 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1879 self._dynamic_config._unsatisfied_deps_for_display.append(
1880 ((myroot, atom), {"myparent" : arg}))
1881 # Previous behavior was to bail out in this case, but
1882 # since the dep is satisfied by the installed package,
1883 # it's more friendly to continue building the graph
1884 # and just show a warning message. Therefore, only bail
1885 # out here if the atom is not from either the system or
1887 if not (isinstance(arg, SetArg) and \
1888 arg.name in ("selected", "system", "world")):
1889 return 0, myfavorites
1891 # Add the selected package to the graph as soon as possible
1892 # so that later dep_check() calls can use it as feedback
1893 # for making more consistent atom selections.
1894 if not self._add_pkg(pkg, dep):
1895 if self._dynamic_config._need_restart:
1897 elif isinstance(arg, SetArg):
1898 sys.stderr.write(("\n\n!!! Problem resolving " + \
1899 "dependencies for %s from %s\n") % \
1902 sys.stderr.write(("\n\n!!! Problem resolving " + \
1903 "dependencies for %s\n") % atom)
1904 return 0, myfavorites
1906 except portage.exception.MissingSignature as e:
1907 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1908 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1909 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1910 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1911 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1912 return 0, myfavorites
1913 except portage.exception.InvalidSignature as e:
1914 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1915 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1916 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1917 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1918 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1919 return 0, myfavorites
1920 except SystemExit as e:
1921 raise # Needed else can't exit
1922 except Exception as e:
1923 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
1924 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
1927 # Now that the root packages have been added to the graph,
1928 # process the dependencies.
1929 if not self._create_graph():
1930 return 0, myfavorites
1933 if "--usepkgonly" in self._frozen_config.myopts:
1934 for xs in self._dynamic_config.digraph.all_nodes():
1935 if not isinstance(xs, Package):
1937 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1939 writemsg("\n", noiselevel=-1)
1941 writemsg("Missing binary for: %s\n" % xs[2], noiselevel=-1)
1945 except self._unknown_internal_error:
1946 return False, myfavorites
1948 if set(self._dynamic_config.digraph).intersection( \
1949 self._dynamic_config._needed_unstable_keywords) or \
1950 set(self._dynamic_config.digraph).intersection( \
1951 self._dynamic_config._needed_use_config_changes) or \
1952 set(self._dynamic_config.digraph).intersection( \
1953 self._dynamic_config._needed_license_changes) :
1954 #We failed if the user needs to change the configuration
1956 self._dynamic_config._success_without_autounmask = True
1957 return False, myfavorites
1959 # We're true here unless we are missing binaries.
1960 return (not missing,myfavorites)
1962 def _set_args(self, args):
1964 Create the "__non_set_args__" package set from atoms and packages given as
1965 arguments. This method can be called multiple times if necessary.
1966 The package selection cache is automatically invalidated, since
1967 arguments influence package selections.
1972 for root in self._dynamic_config.sets:
1973 depgraph_sets = self._dynamic_config.sets[root]
1974 depgraph_sets.sets.setdefault('__non_set_args__',
1975 InternalPackageSet(allow_repo=True)).clear()
1976 depgraph_sets.atoms.clear()
1977 depgraph_sets.atom_arg_map.clear()
1978 set_atoms[root] = []
1979 non_set_atoms[root] = []
1981 # We don't add set args to the digraph here since that
1982 # happens at a later stage and we don't want to make
1983 # any state changes here that aren't reversed by a
1984 # another call to this method.
1985 for arg in self._expand_set_args(args, add_to_digraph=False):
1986 atom_arg_map = self._dynamic_config.sets[
1987 arg.root_config.root].atom_arg_map
1988 if isinstance(arg, SetArg):
1989 atom_group = set_atoms[arg.root_config.root]
1991 atom_group = non_set_atoms[arg.root_config.root]
1993 for atom in arg.pset.getAtoms():
1994 atom_group.append(atom)
1995 atom_key = (atom, arg.root_config.root)
1996 refs = atom_arg_map.get(atom_key)
1999 atom_arg_map[atom_key] = refs
2003 for root in self._dynamic_config.sets:
2004 depgraph_sets = self._dynamic_config.sets[root]
2005 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2006 non_set_atoms.get(root, [])))
2007 depgraph_sets.sets['__non_set_args__'].update(
2008 non_set_atoms.get(root, []))
2010 # Invalidate the package selection cache, since
2011 # arguments influence package selections.
2012 self._dynamic_config._highest_pkg_cache.clear()
2013 for trees in self._dynamic_config._filtered_trees.values():
2014 trees["porttree"].dbapi._clear_cache()
2016 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2018 Return a list of slot atoms corresponding to installed slots that
2019 differ from the slot of the highest visible match. When
2020 blocker_lookahead is True, slot atoms that would trigger a blocker
2021 conflict are automatically discarded, potentially allowing automatic
2022 uninstallation of older slots when appropriate.
2024 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2025 if highest_pkg is None:
2027 vardb = root_config.trees["vartree"].dbapi
2029 for cpv in vardb.match(atom):
2030 # don't mix new virtuals with old virtuals
2031 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2032 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2034 slots.add(highest_pkg.metadata["SLOT"])
2038 slots.remove(highest_pkg.metadata["SLOT"])
2041 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2042 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2043 if pkg is not None and \
2044 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2045 greedy_pkgs.append(pkg)
2048 if not blocker_lookahead:
2049 return [pkg.slot_atom for pkg in greedy_pkgs]
2052 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2053 for pkg in greedy_pkgs + [highest_pkg]:
2054 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2056 selected_atoms = self._select_atoms(
2057 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2058 parent=pkg, strict=True)
2059 except portage.exception.InvalidDependString:
2062 for atoms in selected_atoms.values():
2063 blocker_atoms.extend(x for x in atoms if x.blocker)
2064 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2066 if highest_pkg not in blockers:
2069 # filter packages with invalid deps
2070 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2072 # filter packages that conflict with highest_pkg
2073 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2074 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2075 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2080 # If two packages conflict, discard the lower version.
2081 discard_pkgs = set()
2082 greedy_pkgs.sort(reverse=True)
2083 for i in range(len(greedy_pkgs) - 1):
2084 pkg1 = greedy_pkgs[i]
2085 if pkg1 in discard_pkgs:
2087 for j in range(i + 1, len(greedy_pkgs)):
2088 pkg2 = greedy_pkgs[j]
2089 if pkg2 in discard_pkgs:
2091 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2092 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2094 discard_pkgs.add(pkg2)
2096 return [pkg.slot_atom for pkg in greedy_pkgs \
2097 if pkg not in discard_pkgs]
2099 def _select_atoms_from_graph(self, *pargs, **kwargs):
2101 Prefer atoms matching packages that have already been
2102 added to the graph or those that are installed and have
2103 not been scheduled for replacement.
2105 kwargs["trees"] = self._dynamic_config._graph_trees
2106 return self._select_atoms_highest_available(*pargs, **kwargs)
2108 def _select_atoms_highest_available(self, root, depstring,
2109 myuse=None, parent=None, strict=True, trees=None, priority=None):
2110 """This will raise InvalidDependString if necessary. If trees is
2111 None then self._dynamic_config._filtered_trees is used."""
2113 pkgsettings = self._frozen_config.pkgsettings[root]
2115 trees = self._dynamic_config._filtered_trees
2116 mytrees = trees[root]
2117 atom_graph = digraph()
2119 # Temporarily disable autounmask so that || preferences
2120 # account for masking and USE settings.
2121 _autounmask_backup = self._dynamic_config._autounmask
2122 self._dynamic_config._autounmask = False
2123 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2125 if parent is not None:
2126 trees[root]["parent"] = parent
2127 trees[root]["atom_graph"] = atom_graph
2128 if priority is not None:
2129 trees[root]["priority"] = priority
2130 mycheck = portage.dep_check(depstring, None,
2131 pkgsettings, myuse=myuse,
2132 myroot=root, trees=trees)
2134 self._dynamic_config._autounmask = _autounmask_backup
2135 del mytrees["pkg_use_enabled"]
2136 if parent is not None:
2137 trees[root].pop("parent")
2138 trees[root].pop("atom_graph")
2139 if priority is not None:
2140 trees[root].pop("priority")
2142 raise portage.exception.InvalidDependString(mycheck[1])
2144 selected_atoms = mycheck[1]
2146 chosen_atoms = frozenset(mycheck[1])
2147 selected_atoms = {parent : []}
2148 for node in atom_graph:
2149 if isinstance(node, Atom):
2154 pkg, virt_atom = node
2155 if virt_atom not in chosen_atoms:
2157 if not portage.match_from_list(virt_atom, [pkg]):
2158 # Typically this means that the atom
2159 # specifies USE deps that are unsatisfied
2160 # by the selected package. The caller will
2161 # record this as an unsatisfied dependency
2165 selected_atoms[pkg] = [atom for atom in \
2166 atom_graph.child_nodes(node) if atom in chosen_atoms]
2168 return selected_atoms
2170 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2171 check_backtrack=False):
2173 When check_backtrack=True, no output is produced and
2174 the method either returns or raises _backtrack_mask if
2175 a matching package has been masked by backtracking.
2177 backtrack_mask = False
2178 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
2180 xinfo = '"%s"' % atom.unevaluated_atom
2183 if isinstance(myparent, AtomArg):
2184 xinfo = _unicode_decode('"%s"') % (myparent,)
2185 # Discard null/ from failed cpv_expand category expansion.
2186 xinfo = xinfo.replace("null/", "")
2187 masked_packages = []
2189 masked_pkg_instances = set()
2190 missing_licenses = []
2191 have_eapi_mask = False
2192 pkgsettings = self._frozen_config.pkgsettings[root]
2193 root_config = self._frozen_config.roots[root]
2194 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2195 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2196 for db, pkg_type, built, installed, db_keys in dbs:
2200 if hasattr(db, "xmatch"):
2201 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
2203 cpv_list = db.match(atom.without_use)
2205 if atom.repo is None and hasattr(db, "getRepositories"):
2206 repo_list = db.getRepositories()
2208 repo_list = [atom.repo]
2212 for cpv in cpv_list:
2213 for repo in repo_list:
2214 if not db.cpv_exists(cpv, myrepo=repo):
2217 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2218 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2220 if metadata is not None:
2222 repo = metadata.get('repository')
2223 pkg = self._pkg(cpv, pkg_type, root_config,
2224 installed=installed, myrepo=repo)
2225 if not atom_set.findAtomForPackage(pkg,
2226 modified_use=self._pkg_use_enabled(pkg)):
2228 # pkg.metadata contains calculated USE for ebuilds,
2229 # required later for getMissingLicenses.
2230 metadata = pkg.metadata
2231 if pkg in self._dynamic_config._runtime_pkg_mask:
2232 backtrack_reasons = \
2233 self._dynamic_config._runtime_pkg_mask[pkg]
2234 mreasons.append('backtracking: %s' % \
2235 ', '.join(sorted(backtrack_reasons)))
2236 backtrack_mask = True
2237 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2238 modified_use=self._pkg_use_enabled(pkg)):
2239 mreasons = ["exclude option"]
2241 masked_pkg_instances.add(pkg)
2242 if atom.unevaluated_atom.use:
2244 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2245 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2246 missing_use.append(pkg)
2250 writemsg("violated_conditionals raised " + \
2251 "InvalidAtom: '%s' parent: %s" % \
2252 (atom, myparent), noiselevel=-1)
2254 if pkg.built and not mreasons:
2255 mreasons = ["use flag configuration mismatch"]
2256 masked_packages.append(
2257 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
2261 raise self._backtrack_mask()
2265 missing_use_reasons = []
2266 missing_iuse_reasons = []
2267 for pkg in missing_use:
2268 use = self._pkg_use_enabled(pkg)
2270 #Use the unevaluated atom here, because some flags might have gone
2271 #lost during evaluation.
2272 required_flags = atom.unevaluated_atom.use.required
2273 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2277 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2278 missing_iuse_reasons.append((pkg, mreasons))
2280 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2281 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2283 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
2284 pkgsettings.setcpv(pkg)
2285 untouchable_flags = \
2286 frozenset(chain(pkgsettings.usemask, pkgsettings.useforce))
2287 if untouchable_flags.intersection(
2288 chain(need_enable, need_disable)):
2291 required_use = pkg.metadata["REQUIRED_USE"]
2292 required_use_warning = ""
2294 old_use = self._pkg_use_enabled(pkg)
2295 new_use = set(self._pkg_use_enabled(pkg))
2296 for flag in need_enable:
2298 for flag in need_disable:
2299 new_use.discard(flag)
2300 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2301 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2302 required_use_warning = ", this change violates use flag constraints " + \
2303 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2305 if need_enable or need_disable:
2307 changes.extend(colorize("red", "+" + x) \
2308 for x in need_enable)
2309 changes.extend(colorize("blue", "-" + x) \
2310 for x in need_disable)
2311 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2312 missing_use_reasons.append((pkg, mreasons))
2314 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2315 # Lets see if the violated use deps are conditional.
2316 # If so, suggest to change them on the parent.
2318 # If the child package is masked then a change to
2319 # parent USE is not a valid solution (a normal mask
2320 # message should be displayed instead).
2321 if pkg in masked_pkg_instances:
2325 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2326 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2327 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2328 #all violated use deps are conditional
2330 conditional = violated_atom.use.conditional
2331 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2332 conditional.enabled, conditional.disabled))
2334 pkgsettings = self._frozen_config.pkgsettings[myparent.root]
2335 pkgsettings.setcpv(myparent)
2336 untouchable_flags = \
2337 frozenset(chain(pkgsettings.usemask, pkgsettings.useforce))
2338 if untouchable_flags.intersection(involved_flags):
2341 required_use = myparent.metadata["REQUIRED_USE"]
2342 required_use_warning = ""
2344 old_use = self._pkg_use_enabled(myparent)
2345 new_use = set(self._pkg_use_enabled(myparent))
2346 for flag in involved_flags:
2348 new_use.discard(flag)
2351 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2352 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2353 required_use_warning = ", this change violates use flag constraints " + \
2354 "defined by %s: '%s'" % (myparent.cpv, \
2355 human_readable_required_use(required_use))
2357 for flag in involved_flags:
2358 if flag in self._pkg_use_enabled(myparent):
2359 changes.append(colorize("blue", "-" + flag))
2361 changes.append(colorize("red", "+" + flag))
2362 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2363 if (myparent, mreasons) not in missing_use_reasons:
2364 missing_use_reasons.append((myparent, mreasons))
2366 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2367 in missing_use_reasons if pkg not in masked_pkg_instances]
2369 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2370 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2372 show_missing_use = False
2373 if unmasked_use_reasons:
2374 # Only show the latest version.
2375 show_missing_use = []
2377 parent_reason = None
2378 for pkg, mreasons in unmasked_use_reasons:
2380 if parent_reason is None:
2381 #This happens if a use change on the parent
2382 #leads to a satisfied conditional use dep.
2383 parent_reason = (pkg, mreasons)
2384 elif pkg_reason is None:
2385 #Don't rely on the first pkg in unmasked_use_reasons,
2386 #being the highest version of the dependency.
2387 pkg_reason = (pkg, mreasons)
2389 show_missing_use.append(pkg_reason)
2391 show_missing_use.append(parent_reason)
2393 elif unmasked_iuse_reasons:
2394 masked_with_iuse = False
2395 for pkg in masked_pkg_instances:
2396 #Use atom.unevaluated here, because some flags might have gone
2397 #lost during evaluation.
2398 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2399 # Package(s) with required IUSE are masked,
2400 # so display a normal masking message.
2401 masked_with_iuse = True
2403 if not masked_with_iuse:
2404 show_missing_use = unmasked_iuse_reasons
2408 if show_missing_use:
2409 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2410 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
2411 for pkg, mreasons in show_missing_use:
2412 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
2414 elif masked_packages:
2415 writemsg_stdout("\n!!! " + \
2416 colorize("BAD", "All ebuilds that could satisfy ") + \
2417 colorize("INFORM", xinfo) + \
2418 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
2419 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
2420 have_eapi_mask = show_masked_packages(masked_packages)
2422 writemsg_stdout("\n", noiselevel=-1)
2423 msg = ("The current version of portage supports " + \
2424 "EAPI '%s'. You must upgrade to a newer version" + \
2425 " of portage before EAPI masked packages can" + \
2426 " be installed.") % portage.const.EAPI
2427 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
2428 writemsg_stdout("\n", noiselevel=-1)
2431 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2433 # Show parent nodes and the argument that pulled them in.
2434 traversed_nodes = set()
2436 if isinstance(myparent, AtomArg):
2437 # It's redundant to show parent for AtomArg since
2438 # it's the same as 'xinfo' displayed above.
2443 while node is not None:
2444 traversed_nodes.add(node)
2445 if isinstance(node, DependencyArg):
2446 msg.append('(dependency required by "%s")' % \
2447 colorize('INFORM', _unicode_decode("%s") % (node,)))
2449 msg.append('(dependency required by "%s" [%s])' % \
2450 (colorize('INFORM', _unicode_decode("%s") % \
2451 (node.cpv,)), node.type_name))
2453 if node not in self._dynamic_config.digraph:
2454 # The parent is not in the graph due to backtracking.
2457 # When traversing to parents, prefer arguments over packages
2458 # since arguments are root nodes. Never traverse the same
2459 # package twice, in order to prevent an infinite loop.
2460 selected_parent = None
2461 for parent in self._dynamic_config.digraph.parent_nodes(node):
2462 if parent in traversed_nodes:
2464 if isinstance(parent, DependencyArg):
2465 if self._dynamic_config.digraph.parent_nodes(parent):
2466 selected_parent = parent
2468 msg.append('(dependency required by "%s" [argument])' % \
2469 colorize('INFORM', _unicode_decode("%s") % (parent,)))
2470 selected_parent = None
2473 selected_parent = parent
2474 node = selected_parent
2476 writemsg_stdout("\n".join(msg), noiselevel=-1)
2477 writemsg_stdout("\n", noiselevel=-1)
2481 writemsg_stdout("\n", noiselevel=-1)
2483 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
2484 for db, pkg_type, built, installed, db_keys in \
2485 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2486 for pkg in self._iter_match_pkgs(root_config,
2487 pkg_type, atom, onlydeps=onlydeps):
2490 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2492 Iterate over Package instances of pkg_type matching the given atom.
2493 This does not check visibility and it also does not match USE for
2494 unbuilt ebuilds since USE are lazily calculated after visibility
2495 checks (to avoid the expense when possible).
2498 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2500 if hasattr(db, "xmatch"):
2501 # For portdbapi we match only against the cpv, in order
2502 # to bypass unecessary cache access for things like IUSE
2503 # and SLOT. Later, we cache the metadata in a Package
2504 # instance, and use that for further matching. This
2505 # optimization is especially relevant since
2506 # pordbapi.aux_get() does not cache calls that have
2507 # myrepo or mytree arguments.
2508 cpv_list = db.xmatch("match-all-cpv-only", atom)
2510 cpv_list = db.match(atom)
2512 # USE=multislot can make an installed package appear as if
2513 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2514 # won't do any good as long as USE=multislot is enabled since
2515 # the newly built package still won't have the expected slot.
2516 # Therefore, assume that such SLOT dependencies are already
2517 # satisfied rather than forcing a rebuild.
2518 installed = pkg_type == 'installed'
2519 if installed and not cpv_list and atom.slot:
2520 for cpv in db.match(atom.cp):
2521 slot_available = False
2522 for other_db, other_type, other_built, \
2523 other_installed, other_keys in \
2524 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2527 other_db.aux_get(cpv, ["SLOT"])[0]:
2528 slot_available = True
2532 if not slot_available:
2534 inst_pkg = self._pkg(cpv, "installed",
2535 root_config, installed=installed, myrepo = atom.repo)
2536 # Remove the slot from the atom and verify that
2537 # the package matches the resulting atom.
2538 if portage.match_from_list(
2539 atom.without_slot, [inst_pkg]):
2544 atom_set = InternalPackageSet(initial_atoms=(atom,),
2546 if atom.repo is None and hasattr(db, "getRepositories"):
2547 repo_list = db.getRepositories()
2549 repo_list = [atom.repo]
2553 for cpv in cpv_list:
2554 for repo in repo_list:
2557 pkg = self._pkg(cpv, pkg_type, root_config,
2558 installed=installed, onlydeps=onlydeps, myrepo=repo)
2559 except portage.exception.PackageNotFound:
2562 # A cpv can be returned from dbapi.match() as an
2563 # old-style virtual match even in cases when the
2564 # package does not actually PROVIDE the virtual.
2565 # Filter out any such false matches here.
2567 # Make sure that cpv from the current repo satisfies the atom.
2568 # This might not be the case if there are several repos with
2569 # the same cpv, but different metadata keys, like SLOT.
2570 # Also, for portdbapi, parts of the match that require
2571 # metadata access are deferred until we have cached the
2572 # metadata in a Package instance.
2573 if not atom_set.findAtomForPackage(pkg,
2574 modified_use=self._pkg_use_enabled(pkg)):
2578 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2579 cache_key = (root, atom, onlydeps)
2580 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2583 if pkg and not existing:
2584 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2585 if existing and existing == pkg:
2586 # Update the cache to reflect that the
2587 # package has been added to the graph.
2589 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2591 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2592 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2595 settings = pkg.root_config.settings
2596 if self._pkg_visibility_check(pkg) and not (pkg.installed and \
2597 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2598 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2601 def _want_installed_pkg(self, pkg):
2603 Given an installed package returned from select_pkg, return
2604 True if the user has not explicitly requested for this package
2605 to be replaced (typically via an atom on the command line).
2607 if "selective" not in self._dynamic_config.myparams and \
2608 pkg.root == self._frozen_config.target_root:
2610 next(self._iter_atoms_for_pkg(pkg))
2611 except StopIteration:
2613 except portage.exception.InvalidDependString:
2619 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2620 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2622 default_selection = (pkg, existing)
2624 if self._dynamic_config._autounmask is True:
2625 if pkg is not None and \
2627 not self._want_installed_pkg(pkg):
2630 for only_use_changes in True, False:
2635 self._wrapped_select_pkg_highest_available_imp(
2636 root, atom, onlydeps=onlydeps,
2637 allow_use_changes=True,
2638 allow_unstable_keywords=(not only_use_changes),
2639 allow_license_changes=(not only_use_changes))
2641 if pkg is not None and \
2643 not self._want_installed_pkg(pkg):
2646 if self._dynamic_config._need_restart:
2650 # This ensures that we can fall back to an installed package
2651 # that may have been rejected in the autounmask path above.
2652 return default_selection
2654 return pkg, existing
2656 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False):
2661 if self._frozen_config.myopts.get('--autounmask', 'n') is not True:
2664 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
2665 root_config = self._frozen_config.roots[pkg.root]
2666 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
2668 masked_by_unstable_keywords = False
2669 missing_licenses = None
2670 masked_by_something_else = False
2672 for reason in mreasons:
2673 hint = reason.unmask_hint
2676 masked_by_something_else = True
2677 elif hint.key == "unstable keyword":
2678 masked_by_unstable_keywords = True
2679 elif hint.key == "license":
2680 missing_licenses = hint.value
2682 masked_by_something_else = True
2684 if masked_by_something_else:
2687 if pkg in self._dynamic_config._needed_unstable_keywords:
2688 #If the package is already keyworded, remove the mask.
2689 masked_by_unstable_keywords = False
2691 if missing_licenses:
2692 #If the needed licenses are already unmasked, remove the mask.
2693 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
2695 if not (masked_by_unstable_keywords or missing_licenses):
2696 #Package has already been unmasked.
2699 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
2700 (missing_licenses and not allow_license_changes):
2701 #We are not allowed to do the needed changes.
2704 if masked_by_unstable_keywords:
2705 self._dynamic_config._needed_unstable_keywords.add(pkg)
2706 backtrack_infos = self._dynamic_config._backtrack_infos
2707 backtrack_infos.setdefault("config", {})
2708 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
2709 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
2712 if missing_licenses:
2713 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
2714 backtrack_infos = self._dynamic_config._backtrack_infos
2715 backtrack_infos.setdefault("config", {})
2716 backtrack_infos["config"].setdefault("needed_license_changes", set())
2717 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
2721 def _pkg_use_enabled(self, pkg, target_use=None):
2723 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
2724 If target_use is given, the need changes are computed to make the package useable.
2725 Example: target_use = { "foo": True, "bar": False }
2726 The flags target_use must be in the pkg's IUSE.
2728 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
2730 if target_use is None:
2731 if needed_use_config_change is None:
2732 return pkg.use.enabled
2734 return needed_use_config_change[0]
2736 if needed_use_config_change is not None:
2737 old_use = needed_use_config_change[0]
2739 old_changes = needed_use_config_change[1]
2740 new_changes = old_changes.copy()
2742 old_use = pkg.use.enabled
2747 for flag, state in target_use.items():
2749 if flag not in old_use:
2750 if new_changes.get(flag) == False:
2752 new_changes[flag] = True
2756 if new_changes.get(flag) == True:
2758 new_changes[flag] = False
2759 new_use.update(old_use.difference(target_use))
2761 def want_restart_for_use_change(pkg, new_use):
2762 if pkg not in self._dynamic_config.digraph.nodes:
2765 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
2766 dep = pkg.metadata[key]
2767 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2768 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2770 if old_val != new_val:
2773 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
2774 if not parent_atoms:
2777 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
2778 for ppkg, atom in parent_atoms:
2779 if not atom.use or \
2780 not atom.use.required.intersection(changes):
2787 if new_changes != old_changes:
2788 #Don't do the change if it violates REQUIRED_USE.
2789 required_use = pkg.metadata["REQUIRED_USE"]
2790 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2791 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2794 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
2795 backtrack_infos = self._dynamic_config._backtrack_infos
2796 backtrack_infos.setdefault("config", {})
2797 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
2798 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
2799 if want_restart_for_use_change(pkg, new_use):
2800 self._dynamic_config._need_restart = True
2803 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
2804 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False):
2805 root_config = self._frozen_config.roots[root]
2806 pkgsettings = self._frozen_config.pkgsettings[root]
2807 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2808 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2809 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2810 # List of acceptable packages, ordered by type preference.
2811 matched_packages = []
2812 matched_pkgs_ignore_use = []
2813 highest_version = None
2814 if not isinstance(atom, portage.dep.Atom):
2815 atom = portage.dep.Atom(atom)
2817 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
2818 existing_node = None
2820 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
2821 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2822 empty = "empty" in self._dynamic_config.myparams
2823 selective = "selective" in self._dynamic_config.myparams
2825 noreplace = "--noreplace" in self._frozen_config.myopts
2826 avoid_update = "--update" not in self._frozen_config.myopts
2827 dont_miss_updates = "--update" in self._frozen_config.myopts
2828 use_ebuild_visibility = self._frozen_config.myopts.get(
2829 '--use-ebuild-visibility', 'n') != 'n'
2830 # Behavior of the "selective" parameter depends on
2831 # whether or not a package matches an argument atom.
2832 # If an installed package provides an old-style
2833 # virtual that is no longer provided by an available
2834 # package, the installed package may match an argument
2835 # atom even though none of the available packages do.
2836 # Therefore, "selective" logic does not consider
2837 # whether or not an installed package matches an
2838 # argument atom. It only considers whether or not
2839 # available packages match argument atoms, which is
2840 # represented by the found_available_arg flag.
2841 found_available_arg = False
2842 packages_with_invalid_use_config = []
2843 for find_existing_node in True, False:
2846 for db, pkg_type, built, installed, db_keys in dbs:
2849 if installed and not find_existing_node:
2850 want_reinstall = reinstall or empty or \
2851 (found_available_arg and not selective)
2852 if want_reinstall and matched_packages:
2855 # Ignore USE deps for the initial match since we want to
2856 # ensure that updates aren't missed solely due to the user's
2857 # USE configuration.
2858 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
2860 if pkg in self._dynamic_config._runtime_pkg_mask:
2861 # The package has been masked by the backtracking logic
2864 if not pkg.installed and \
2865 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2866 modified_use=self._pkg_use_enabled(pkg)):
2869 if dont_miss_updates:
2870 # Check if a higher version was rejected due to user
2871 # USE configuration. The packages_with_invalid_use_config
2872 # list only contains unbuilt ebuilds since USE can't
2873 # be changed for built packages.
2874 higher_version_rejected = False
2875 for rejected in packages_with_invalid_use_config:
2876 if rejected.cp != pkg.cp:
2879 higher_version_rejected = True
2881 if higher_version_rejected:
2885 # Make --noreplace take precedence over --newuse.
2886 if not pkg.installed and noreplace and \
2887 cpv in vardb.match(atom):
2888 inst_pkg = self._pkg(pkg.cpv, "installed",
2889 root_config, installed=True)
2890 if inst_pkg.visible:
2891 # If the installed version is masked, it may
2892 # be necessary to look at lower versions,
2893 # in case there is a visible downgrade.
2895 reinstall_for_flags = None
2897 if not pkg.installed or \
2898 (matched_packages and not avoid_update):
2899 # Only enforce visibility on installed packages
2900 # if there is at least one other visible package
2901 # available. By filtering installed masked packages
2902 # here, packages that have been masked since they
2903 # were installed can be automatically downgraded
2904 # to an unmasked version.
2906 if not self._pkg_visibility_check(pkg, \
2907 allow_unstable_keywords=allow_unstable_keywords,
2908 allow_license_changes=allow_license_changes):
2911 # Enable upgrade or downgrade to a version
2912 # with visible KEYWORDS when the installed
2913 # version is masked by KEYWORDS, but never
2914 # reinstall the same exact version only due
2915 # to a KEYWORDS mask. See bug #252167.
2916 if matched_packages:
2918 different_version = None
2919 for avail_pkg in matched_packages:
2920 if not portage.dep.cpvequal(
2921 pkg.cpv, avail_pkg.cpv):
2922 different_version = avail_pkg
2924 if different_version is not None:
2925 # If the ebuild no longer exists or it's
2926 # keywords have been dropped, reject built
2927 # instances (installed or binary).
2928 # If --usepkgonly is enabled, assume that
2929 # the ebuild status should be ignored.
2930 if not use_ebuild_visibility and usepkgonly:
2932 pkgsettings._getMissingKeywords(
2933 pkg.cpv, pkg.metadata):
2938 pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
2939 except portage.exception.PackageNotFound:
2942 if not self._pkg_visibility_check(pkg_eb, \
2943 allow_unstable_keywords=allow_unstable_keywords,
2944 allow_license_changes=allow_license_changes):
2947 # Calculation of USE for unbuilt ebuilds is relatively
2948 # expensive, so it is only performed lazily, after the
2949 # above visibility checks are complete.
2952 if root == self._frozen_config.target_root:
2954 myarg = next(self._iter_atoms_for_pkg(pkg))
2955 except StopIteration:
2957 except portage.exception.InvalidDependString:
2959 # masked by corruption
2961 if not installed and myarg:
2962 found_available_arg = True
2965 if pkg.iuse.get_missing_iuse(atom.use.required):
2966 # Don't add this to packages_with_invalid_use_config
2967 # since IUSE cannot be adjusted by the user.
2970 matched_pkgs_ignore_use.append(pkg)
2971 if allow_use_changes:
2973 for flag in atom.use.enabled:
2974 target_use[flag] = True
2975 for flag in atom.use.disabled:
2976 target_use[flag] = False
2977 use = self._pkg_use_enabled(pkg, target_use)
2979 use = self._pkg_use_enabled(pkg)
2981 if atom.use.enabled.difference(use) and \
2982 atom.use.enabled.difference(use).difference(atom.use.missing_enabled.difference(pkg.iuse.all)):
2984 packages_with_invalid_use_config.append(pkg)
2986 if atom.use.disabled.intersection(use) or \
2987 atom.use.disabled.difference(pkg.iuse.all).difference(atom.use.missing_disabled):
2989 packages_with_invalid_use_config.append(pkg)
2991 elif atom.unevaluated_atom.use:
2992 #Make sure we don't miss a 'missing IUSE'.
2993 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2994 # Don't add this to packages_with_invalid_use_config
2995 # since IUSE cannot be adjusted by the user.
2998 #check REQUIRED_USE constraints
2999 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
3000 eapi_has_required_use(pkg.metadata["EAPI"]):
3001 required_use = pkg.metadata["REQUIRED_USE"]
3002 use = self._pkg_use_enabled(pkg)
3004 required_use_is_sat = check_required_use(
3005 pkg.metadata["REQUIRED_USE"], use, pkg.iuse.is_valid_flag)
3006 except portage.exception.InvalidDependString as e:
3007 portage.writemsg("!!! Invalid REQUIRED_USE specified by " + \
3008 "'%s': %s\n" % (pkg.cpv, str(e)), noiselevel=-1)
3011 if not required_use_is_sat:
3014 if pkg.cp == atom_cp:
3015 if highest_version is None:
3016 highest_version = pkg
3017 elif pkg > highest_version:
3018 highest_version = pkg
3019 # At this point, we've found the highest visible
3020 # match from the current repo. Any lower versions
3021 # from this repo are ignored, so this so the loop
3022 # will always end with a break statement below
3024 if find_existing_node:
3025 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3028 # Use PackageSet.findAtomForPackage()
3029 # for PROVIDE support.
3030 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3031 if highest_version and \
3032 e_pkg.cp == atom_cp and \
3033 e_pkg < highest_version and \
3034 e_pkg.slot_atom != highest_version.slot_atom:
3035 # There is a higher version available in a
3036 # different slot, so this existing node is
3040 matched_packages.append(e_pkg)
3041 existing_node = e_pkg
3043 # Compare built package to current config and
3044 # reject the built package if necessary.
3045 if built and (not installed or matched_pkgs_ignore_use) and \
3046 ("--newuse" in self._frozen_config.myopts or \
3047 "--reinstall" in self._frozen_config.myopts or \
3048 "--binpkg-respect-use" in self._frozen_config.myopts):
3049 iuses = pkg.iuse.all
3050 old_use = self._pkg_use_enabled(pkg)
3052 pkgsettings.setcpv(myeb)
3054 pkgsettings.setcpv(pkg)
3055 now_use = pkgsettings["PORTAGE_USE"].split()
3056 forced_flags = set()
3057 forced_flags.update(pkgsettings.useforce)
3058 forced_flags.update(pkgsettings.usemask)
3060 if myeb and not usepkgonly:
3061 cur_iuse = myeb.iuse.all
3062 if self._reinstall_for_flags(forced_flags,
3066 # Compare current config to installed package
3067 # and do not reinstall if possible.
3068 if not installed and \
3069 ("--newuse" in self._frozen_config.myopts or \
3070 "--reinstall" in self._frozen_config.myopts) and \
3071 cpv in vardb.match(atom):
3072 pkgsettings.setcpv(pkg)
3073 forced_flags = set()
3074 forced_flags.update(pkgsettings.useforce)
3075 forced_flags.update(pkgsettings.usemask)
3076 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3077 old_iuse = set(filter_iuse_defaults(
3078 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3079 cur_use = self._pkg_use_enabled(pkg)
3080 cur_iuse = pkg.iuse.all
3081 reinstall_for_flags = \
3082 self._reinstall_for_flags(
3083 forced_flags, old_use, old_iuse,
3085 if reinstall_for_flags:
3089 matched_packages.append(pkg)
3090 if reinstall_for_flags:
3091 self._dynamic_config._reinstall_nodes[pkg] = \
3095 if not matched_packages:
3098 if "--debug" in self._frozen_config.myopts:
3099 for pkg in matched_packages:
3100 portage.writemsg("%s %s\n" % \
3101 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3103 # Filter out any old-style virtual matches if they are
3104 # mixed with new-style virtual matches.
3106 if len(matched_packages) > 1 and \
3107 "virtual" == portage.catsplit(cp)[0]:
3108 for pkg in matched_packages:
3111 # Got a new-style virtual, so filter
3112 # out any old-style virtuals.
3113 matched_packages = [pkg for pkg in matched_packages \
3117 if existing_node is not None and \
3118 existing_node in matched_packages:
3119 return existing_node, existing_node
3121 if len(matched_packages) > 1:
3122 if rebuilt_binaries:
3125 for pkg in matched_packages:
3130 if built_pkg is not None and inst_pkg is not None:
3131 # Only reinstall if binary package BUILD_TIME is
3132 # non-empty, in order to avoid cases like to
3133 # bug #306659 where BUILD_TIME fields are missing
3134 # in local and/or remote Packages file.
3136 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3137 except (KeyError, ValueError):
3141 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3142 except (KeyError, ValueError):
3143 installed_timestamp = 0
3145 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3146 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3147 if built_timestamp and \
3148 built_timestamp > installed_timestamp and \
3149 built_timestamp >= minimal_timestamp:
3150 return built_pkg, existing_node
3152 #Don't care if the binary has an older BUILD_TIME than the installed
3153 #package. This is for closely tracking a binhost.
3154 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3156 if built_timestamp and \
3157 built_timestamp != installed_timestamp:
3158 return built_pkg, existing_node
3160 for pkg in matched_packages:
3161 if pkg.installed and pkg.invalid:
3162 matched_packages = [x for x in \
3163 matched_packages if x is not pkg]
3166 for pkg in matched_packages:
3167 if pkg.installed and self._pkg_visibility_check(pkg, \
3168 allow_unstable_keywords=allow_unstable_keywords,
3169 allow_license_changes=allow_license_changes):
3170 return pkg, existing_node
3172 bestmatch = portage.best(
3173 [pkg.cpv for pkg in matched_packages \
3174 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3175 allow_license_changes=allow_license_changes)])
3177 # all are masked, so ignore visibility
3178 bestmatch = portage.best(
3179 [pkg.cpv for pkg in matched_packages])
3180 matched_packages = [pkg for pkg in matched_packages \
3181 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3183 # ordered by type preference ("ebuild" type is the last resort)
3184 return matched_packages[-1], existing_node
3186 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3188 Select packages that have already been added to the graph or
3189 those that are installed and have not been scheduled for
3192 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3193 matches = graph_db.match_pkgs(atom)
3196 pkg = matches[-1] # highest match
3197 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3198 return pkg, in_graph
3200 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3202 Select packages that are installed.
3204 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3205 matches = vardb.match_pkgs(atom)
3208 pkg = matches[-1] # highest match
3209 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3210 return pkg, in_graph
3212 def _complete_graph(self, required_sets=None):
3214 Add any deep dependencies of required sets (args, system, world) that
3215 have not been pulled into the graph yet. This ensures that the graph
3216 is consistent such that initially satisfied deep dependencies are not
3217 broken in the new graph. Initially unsatisfied dependencies are
3218 irrelevant since we only want to avoid breaking dependencies that are
3219 initially satisfied.
3221 Since this method can consume enough time to disturb users, it is
3222 currently only enabled by the --complete-graph option.
3224 @param required_sets: contains required sets (currently only used
3225 for depclean and prune removal operations)
3226 @type required_sets: dict
3228 if "--buildpkgonly" in self._frozen_config.myopts or \
3229 "recurse" not in self._dynamic_config.myparams:
3232 if "complete" not in self._dynamic_config.myparams:
3233 # Skip this to avoid consuming enough time to disturb users.
3238 # Put the depgraph into a mode that causes it to only
3239 # select packages that have already been added to the
3240 # graph or those that are installed and have not been
3241 # scheduled for replacement. Also, toggle the "deep"
3242 # parameter so that all dependencies are traversed and
3244 self._select_atoms = self._select_atoms_from_graph
3245 if "remove" in self._dynamic_config.myparams:
3246 self._select_package = self._select_pkg_from_installed
3248 self._select_package = self._select_pkg_from_graph
3249 already_deep = self._dynamic_config.myparams.get("deep") is True
3250 if not already_deep:
3251 self._dynamic_config.myparams["deep"] = True
3253 # Invalidate the package selection cache, since
3254 # _select_package has just changed implementations.
3255 for trees in self._dynamic_config._filtered_trees.values():
3256 trees["porttree"].dbapi._clear_cache()
3258 args = self._dynamic_config._initial_arg_list[:]
3259 for root in self._frozen_config.roots:
3260 if root != self._frozen_config.target_root and \
3261 "remove" in self._dynamic_config.myparams:
3262 # Only pull in deps for the relevant root.
3264 depgraph_sets = self._dynamic_config.sets[root]
3265 required_set_names = self._frozen_config._required_set_names.copy()
3266 remaining_args = required_set_names.copy()
3267 if required_sets is None or root not in required_sets:
3270 # Removal actions may override sets with temporary
3271 # replacements that have had atoms removed in order
3272 # to implement --deselect behavior.
3273 required_set_names = set(required_sets[root])
3274 depgraph_sets.sets.clear()
3275 depgraph_sets.sets.update(required_sets[root])
3276 if "remove" not in self._dynamic_config.myparams and \
3277 root == self._frozen_config.target_root and \
3278 (already_deep or "empty" in self._dynamic_config.myparams):
3279 remaining_args.difference_update(depgraph_sets.sets)
3280 if not remaining_args and \
3281 not self._dynamic_config._ignored_deps and \
3282 not self._dynamic_config._dep_stack:
3284 root_config = self._frozen_config.roots[root]
3285 for s in required_set_names:
3286 pset = depgraph_sets.sets.get(s)
3288 pset = root_config.sets[s]
3289 atom = SETPREFIX + s
3290 args.append(SetArg(arg=atom, pset=pset,
3291 root_config=root_config))
3293 self._set_args(args)
3294 for arg in self._expand_set_args(args, add_to_digraph=True):
3295 for atom in arg.pset.getAtoms():
3296 self._dynamic_config._dep_stack.append(
3297 Dependency(atom=atom, root=arg.root_config.root,
3301 if self._dynamic_config._ignored_deps:
3302 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
3303 self._dynamic_config._ignored_deps = []
3304 if not self._create_graph(allow_unsatisfied=True):
3306 # Check the unsatisfied deps to see if any initially satisfied deps
3307 # will become unsatisfied due to an upgrade. Initially unsatisfied
3308 # deps are irrelevant since we only want to avoid breaking deps
3309 # that are initially satisfied.
3310 while self._dynamic_config._unsatisfied_deps:
3311 dep = self._dynamic_config._unsatisfied_deps.pop()
3312 vardb = self._frozen_config.roots[
3313 dep.root].trees["vartree"].dbapi
3314 matches = vardb.match_pkgs(dep.atom)
3316 self._dynamic_config._initially_unsatisfied_deps.append(dep)
3318 # An scheduled installation broke a deep dependency.
3319 # Add the installed package to the graph so that it
3320 # will be appropriately reported as a slot collision
3321 # (possibly solvable via backtracking).
3322 pkg = matches[-1] # highest match
3323 if not self._add_pkg(pkg, dep):
3325 if not self._create_graph(allow_unsatisfied=True):
3329 def _pkg(self, cpv, type_name, root_config, installed=False,
3330 onlydeps=False, myrepo = None):
3332 Get a package instance from the cache, or create a new
3333 one if necessary. Raises PackageNotFound from aux_get if it
3334 failures for some reason (package does not exist or is
3337 if type_name != "ebuild":
3338 # For installed (and binary) packages we don't care for the repo
3339 # when it comes to hashing, because there can only be one cpv.
3340 # So overwrite the repo_key with type_name.
3341 repo_key = type_name
3343 elif myrepo is None:
3344 raise AssertionError(
3345 "depgraph._pkg() called without 'myrepo' argument")
3350 if installed or onlydeps:
3351 operation = "nomerge"
3352 # Ensure that we use the specially optimized RootConfig instance
3353 # that refers to FakeVartree instead of the real vartree.
3354 root_config = self._frozen_config.roots[root_config.root]
3355 pkg = self._frozen_config._pkg_cache.get(
3356 (type_name, root_config.root, cpv, operation, repo_key))
3357 if pkg is None and onlydeps and not installed:
3358 # Maybe it already got pulled in as a "merge" node.
3359 pkg = self._dynamic_config.mydbapi[root_config.root].get(
3360 (type_name, root_config.root, cpv, 'merge', repo_key))
3363 tree_type = self.pkg_tree_map[type_name]
3364 db = root_config.trees[tree_type].dbapi
3365 db_keys = list(self._frozen_config._trees_orig[root_config.root][
3366 tree_type].dbapi._aux_cache_keys)
3369 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
3371 raise portage.exception.PackageNotFound(cpv)
3373 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
3374 installed=installed, metadata=metadata, onlydeps=onlydeps,
3375 root_config=root_config, type_name=type_name)
3377 self._frozen_config._pkg_cache[pkg] = pkg
3379 if not self._pkg_visibility_check(pkg) and \
3380 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
3381 slot_key = (pkg.root, pkg.slot_atom)
3382 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
3383 if other_pkg is None or pkg > other_pkg:
3384 self._frozen_config._highest_license_masked[slot_key] = pkg
3388 def _validate_blockers(self):
3389 """Remove any blockers from the digraph that do not match any of the
3390 packages within the graph. If necessary, create hard deps to ensure
3391 correct merge order such that mutually blocking packages are never
3392 installed simultaneously."""
3394 if "--buildpkgonly" in self._frozen_config.myopts or \
3395 "--nodeps" in self._frozen_config.myopts:
3398 complete = "complete" in self._dynamic_config.myparams
3399 deep = "deep" in self._dynamic_config.myparams
3402 # Pull in blockers from all installed packages that haven't already
3403 # been pulled into the depgraph. This is not enabled by default
3404 # due to the performance penalty that is incurred by all the
3405 # additional dep_check calls that are required.
3407 # For installed packages, always ignore blockers from DEPEND since
3408 # only runtime dependencies should be relevant for packages that
3409 # are already built.
3410 dep_keys = ["RDEPEND", "PDEPEND"]
3411 for myroot in self._frozen_config.trees:
3412 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
3413 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
3414 pkgsettings = self._frozen_config.pkgsettings[myroot]
3415 root_config = self._frozen_config.roots[myroot]
3416 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
3417 final_db = self._dynamic_config.mydbapi[myroot]
3419 blocker_cache = BlockerCache(myroot, vardb)
3420 stale_cache = set(blocker_cache)
3423 stale_cache.discard(cpv)
3424 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
3426 pkg in self._dynamic_config._traversed_pkg_deps
3428 # Check for masked installed packages. Only warn about
3429 # packages that are in the graph in order to avoid warning
3430 # about those that will be automatically uninstalled during
3431 # the merge process or by --depclean. Always warn about
3432 # packages masked by license, since the user likely wants
3433 # to adjust ACCEPT_LICENSE.
3435 if not self._pkg_visibility_check(pkg) and \
3436 (pkg_in_graph or 'LICENSE' in pkg.masks):
3437 self._dynamic_config._masked_installed.add(pkg)
3439 self._check_masks(pkg)
3441 blocker_atoms = None
3447 self._dynamic_config._blocker_parents.child_nodes(pkg))
3452 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
3456 # Select just the runtime blockers.
3457 blockers = [blocker for blocker in blockers \
3458 if blocker.priority.runtime or \
3459 blocker.priority.runtime_post]
3460 if blockers is not None:
3461 blockers = set(blocker.atom for blocker in blockers)
3463 # If this node has any blockers, create a "nomerge"
3464 # node for it so that they can be enforced.
3465 self._spinner_update()
3466 blocker_data = blocker_cache.get(cpv)
3467 if blocker_data is not None and \
3468 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3471 # If blocker data from the graph is available, use
3472 # it to validate the cache and update the cache if
3474 if blocker_data is not None and \
3475 blockers is not None:
3476 if not blockers.symmetric_difference(
3477 blocker_data.atoms):
3481 if blocker_data is None and \
3482 blockers is not None:
3483 # Re-use the blockers from the graph.
3484 blocker_atoms = sorted(blockers)
3485 counter = long(pkg.metadata["COUNTER"])
3487 blocker_cache.BlockerData(counter, blocker_atoms)
3488 blocker_cache[pkg.cpv] = blocker_data
3492 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
3494 # Use aux_get() to trigger FakeVartree global
3495 # updates on *DEPEND when appropriate.
3496 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3497 # It is crucial to pass in final_db here in order to
3498 # optimize dep_check calls by eliminating atoms via
3499 # dep_wordreduce and dep_eval calls.
3501 success, atoms = portage.dep_check(depstr,
3502 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
3503 trees=self._dynamic_config._graph_trees, myroot=myroot)
3506 except Exception as e:
3507 # This is helpful, for example, if a ValueError
3508 # is thrown from cpv_expand due to multiple
3509 # matches (this can happen if an atom lacks a
3511 show_invalid_depstring_notice(
3512 pkg, depstr, str(e))
3516 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3517 if replacement_pkg and \
3518 replacement_pkg[0].operation == "merge":
3519 # This package is being replaced anyway, so
3520 # ignore invalid dependencies so as not to
3521 # annoy the user too much (otherwise they'd be
3522 # forced to manually unmerge it first).
3524 show_invalid_depstring_notice(pkg, depstr, atoms)
3526 blocker_atoms = [myatom for myatom in atoms \
3528 blocker_atoms.sort()
3529 counter = long(pkg.metadata["COUNTER"])
3530 blocker_cache[cpv] = \
3531 blocker_cache.BlockerData(counter, blocker_atoms)
3534 for atom in blocker_atoms:
3535 blocker = Blocker(atom=atom,
3536 eapi=pkg.metadata["EAPI"],
3537 priority=self._priority(runtime=True),
3539 self._dynamic_config._blocker_parents.add(blocker, pkg)
3540 except portage.exception.InvalidAtom as e:
3541 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3542 show_invalid_depstring_notice(
3543 pkg, depstr, "Invalid Atom: %s" % (e,))
3545 for cpv in stale_cache:
3546 del blocker_cache[cpv]
3547 blocker_cache.flush()
3550 # Discard any "uninstall" tasks scheduled by previous calls
3551 # to this method, since those tasks may not make sense given
3552 # the current graph state.
3553 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
3554 if previous_uninstall_tasks:
3555 self._dynamic_config._blocker_uninstalls = digraph()
3556 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
3558 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
3559 self._spinner_update()
3560 root_config = self._frozen_config.roots[blocker.root]
3561 virtuals = root_config.settings.getvirtuals()
3562 myroot = blocker.root
3563 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
3564 final_db = self._dynamic_config.mydbapi[myroot]
3566 provider_virtual = False
3567 if blocker.cp in virtuals and \
3568 not self._have_new_virt(blocker.root, blocker.cp):
3569 provider_virtual = True
3571 # Use this to check PROVIDE for each matched package
3573 atom_set = InternalPackageSet(
3574 initial_atoms=[blocker.atom])
3576 if provider_virtual:
3578 for provider_entry in virtuals[blocker.cp]:
3579 atoms.append(Atom(blocker.atom.replace(
3580 blocker.cp, provider_entry.cp, 1)))
3582 atoms = [blocker.atom]
3584 blocked_initial = set()
3586 for pkg in initial_db.match_pkgs(atom):
3587 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3588 blocked_initial.add(pkg)
3590 blocked_final = set()
3592 for pkg in final_db.match_pkgs(atom):
3593 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3594 blocked_final.add(pkg)
3596 if not blocked_initial and not blocked_final:
3597 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
3598 self._dynamic_config._blocker_parents.remove(blocker)
3599 # Discard any parents that don't have any more blockers.
3600 for pkg in parent_pkgs:
3601 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
3602 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3603 self._dynamic_config._blocker_parents.remove(pkg)
3605 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3606 unresolved_blocks = False
3607 depends_on_order = set()
3608 for pkg in blocked_initial:
3609 if pkg.slot_atom == parent.slot_atom and \
3610 not blocker.atom.blocker.overlap.forbid:
3611 # New !!atom blockers do not allow temporary
3612 # simulaneous installation, so unlike !atom
3613 # blockers, !!atom blockers aren't ignored
3614 # when they match other packages occupying
3617 if parent.installed:
3618 # Two currently installed packages conflict with
3619 # eachother. Ignore this case since the damage
3620 # is already done and this would be likely to
3621 # confuse users if displayed like a normal blocker.
3624 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3626 if parent.operation == "merge":
3627 # Maybe the blocked package can be replaced or simply
3628 # unmerged to resolve this block.
3629 depends_on_order.add((pkg, parent))
3631 # None of the above blocker resolutions techniques apply,
3632 # so apparently this one is unresolvable.
3633 unresolved_blocks = True
3634 for pkg in blocked_final:
3635 if pkg.slot_atom == parent.slot_atom and \
3636 not blocker.atom.blocker.overlap.forbid:
3637 # New !!atom blockers do not allow temporary
3638 # simulaneous installation, so unlike !atom
3639 # blockers, !!atom blockers aren't ignored
3640 # when they match other packages occupying
3643 if parent.operation == "nomerge" and \
3644 pkg.operation == "nomerge":
3645 # This blocker will be handled the next time that a
3646 # merge of either package is triggered.
3649 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3651 # Maybe the blocking package can be
3652 # unmerged to resolve this block.
3653 if parent.operation == "merge" and pkg.installed:
3654 depends_on_order.add((pkg, parent))
3656 elif parent.operation == "nomerge":
3657 depends_on_order.add((parent, pkg))
3659 # None of the above blocker resolutions techniques apply,
3660 # so apparently this one is unresolvable.
3661 unresolved_blocks = True
3663 # Make sure we don't unmerge any package that have been pulled
3665 if not unresolved_blocks and depends_on_order:
3666 for inst_pkg, inst_task in depends_on_order:
3667 if self._dynamic_config.digraph.contains(inst_pkg) and \
3668 self._dynamic_config.digraph.parent_nodes(inst_pkg):
3669 unresolved_blocks = True
3672 if not unresolved_blocks and depends_on_order:
3673 for inst_pkg, inst_task in depends_on_order:
3674 uninst_task = Package(built=inst_pkg.built,
3675 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3676 metadata=inst_pkg.metadata,
3677 operation="uninstall",
3678 root_config=inst_pkg.root_config,
3679 type_name=inst_pkg.type_name)
3680 # Enforce correct merge order with a hard dep.
3681 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
3682 priority=BlockerDepPriority.instance)
3683 # Count references to this blocker so that it can be
3684 # invalidated after nodes referencing it have been
3686 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
3687 if not unresolved_blocks and not depends_on_order:
3688 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
3689 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
3690 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
3691 self._dynamic_config._blocker_parents.remove(blocker)
3692 if not self._dynamic_config._blocker_parents.child_nodes(parent):
3693 self._dynamic_config._blocker_parents.remove(parent)
3694 if unresolved_blocks:
3695 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
3699 def _accept_blocker_conflicts(self):
3701 for x in ("--buildpkgonly", "--fetchonly",
3702 "--fetch-all-uri", "--nodeps"):
3703 if x in self._frozen_config.myopts:
3708 def _merge_order_bias(self, mygraph):
3710 For optimal leaf node selection, promote deep system runtime deps and
3711 order nodes from highest to lowest overall reference count.
3715 for node in mygraph.order:
3716 node_info[node] = len(mygraph.parent_nodes(node))
3717 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
3719 def cmp_merge_preference(node1, node2):
3721 if node1.operation == 'uninstall':
3722 if node2.operation == 'uninstall':
3726 if node2.operation == 'uninstall':
3727 if node1.operation == 'uninstall':
3731 node1_sys = node1 in deep_system_deps
3732 node2_sys = node2 in deep_system_deps
3733 if node1_sys != node2_sys:
3738 return node_info[node2] - node_info[node1]
3740 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3742 def altlist(self, reversed=False):
3744 while self._dynamic_config._serialized_tasks_cache is None:
3745 self._resolve_conflicts()
3747 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3748 self._serialize_tasks()
3749 except self._serialize_tasks_retry:
3752 retlist = self._dynamic_config._serialized_tasks_cache[:]
3757 def schedulerGraph(self):
3759 The scheduler graph is identical to the normal one except that
3760 uninstall edges are reversed in specific cases that require
3761 conflicting packages to be temporarily installed simultaneously.
3762 This is intended for use by the Scheduler in it's parallelization
3763 logic. It ensures that temporary simultaneous installation of
3764 conflicting packages is avoided when appropriate (especially for
3765 !!atom blockers), but allowed in specific cases that require it.
3767 Note that this method calls break_refs() which alters the state of
3768 internal Package instances such that this depgraph instance should
3769 not be used to perform any more calculations.
3772 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
3773 mergelist = self.altlist()
3774 self.break_refs(mergelist)
3775 self.break_refs(self._dynamic_config._scheduler_graph.order)
3777 # Break DepPriority.satisfied attributes which reference
3778 # installed Package instances.
3779 for parents, children, node in \
3780 self._dynamic_config._scheduler_graph.nodes.values():
3781 for priorities in chain(parents.values(), children.values()):
3782 for priority in priorities:
3783 if priority.satisfied:
3784 priority.satisfied = True
3786 pkg_cache = self._frozen_config._pkg_cache
3787 graph = self._dynamic_config._scheduler_graph
3788 trees = self._frozen_config.trees
3789 pruned_pkg_cache = {}
3790 for key, pkg in pkg_cache.items():
3791 if pkg in graph or \
3792 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
3793 pruned_pkg_cache[key] = pkg
3796 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
3797 self.break_refs(trees[root]['vartree'].dbapi)
3799 self.break_refs(pruned_pkg_cache.values())
3801 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
3805 def break_refs(self, nodes):
3807 Take a mergelist like that returned from self.altlist() and
3808 break any references that lead back to the depgraph. This is
3809 useful if you want to hold references to packages without
3810 also holding the depgraph on the heap.
3813 if hasattr(node, "root_config"):
3814 # The FakeVartree references the _package_cache which
3815 # references the depgraph. So that Package instances don't
3816 # hold the depgraph and FakeVartree on the heap, replace
3817 # the RootConfig that references the FakeVartree with the
3818 # original RootConfig instance which references the actual
3820 node.root_config = \
3821 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
3823 def _resolve_conflicts(self):
3824 if not self._complete_graph():
3825 raise self._unknown_internal_error()
3827 if not self._validate_blockers():
3828 raise self._unknown_internal_error()
3830 if self._dynamic_config._slot_collision_info:
3831 self._process_slot_conflicts()
3833 def _serialize_tasks(self):
3835 if "--debug" in self._frozen_config.myopts:
3836 writemsg("\ndigraph:\n\n", noiselevel=-1)
3837 self._dynamic_config.digraph.debug_print()
3838 writemsg("\n", noiselevel=-1)
3840 scheduler_graph = self._dynamic_config.digraph.copy()
3842 if '--nodeps' in self._frozen_config.myopts:
3843 # Preserve the package order given on the command line.
3844 return ([node for node in scheduler_graph \
3845 if isinstance(node, Package) \
3846 and node.operation == 'merge'], scheduler_graph)
3848 mygraph=self._dynamic_config.digraph.copy()
3850 removed_nodes = set()
3852 # Prune off all DependencyArg instances since they aren't
3853 # needed, and because of nested sets this is faster than doing
3854 # it with multiple digraph.root_nodes() calls below. This also
3855 # takes care of nested sets that have circular references,
3856 # which wouldn't be matched by digraph.root_nodes().
3857 for node in mygraph:
3858 if isinstance(node, DependencyArg):
3859 removed_nodes.add(node)
3861 mygraph.difference_update(removed_nodes)
3862 removed_nodes.clear()
3864 # Prune "nomerge" root nodes if nothing depends on them, since
3865 # otherwise they slow down merge order calculation. Don't remove
3866 # non-root nodes since they help optimize merge order in some cases
3867 # such as revdep-rebuild.
3870 for node in mygraph.root_nodes():
3871 if not isinstance(node, Package) or \
3872 node.installed or node.onlydeps:
3873 removed_nodes.add(node)
3875 self._spinner_update()
3876 mygraph.difference_update(removed_nodes)
3877 if not removed_nodes:
3879 removed_nodes.clear()
3880 self._merge_order_bias(mygraph)
3881 def cmp_circular_bias(n1, n2):
3883 RDEPEND is stronger than PDEPEND and this function
3884 measures such a strength bias within a circular
3885 dependency relationship.
3887 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3888 ignore_priority=priority_range.ignore_medium_soft)
3889 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3890 ignore_priority=priority_range.ignore_medium_soft)
3891 if n1_n2_medium == n2_n1_medium:
3896 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
3898 # Contains uninstall tasks that have been scheduled to
3899 # occur after overlapping blockers have been installed.
3900 scheduled_uninstalls = set()
3901 # Contains any Uninstall tasks that have been ignored
3902 # in order to avoid the circular deps code path. These
3903 # correspond to blocker conflicts that could not be
3905 ignored_uninstall_tasks = set()
3906 have_uninstall_task = False
3907 complete = "complete" in self._dynamic_config.myparams
3910 def get_nodes(**kwargs):
3912 Returns leaf nodes excluding Uninstall instances
3913 since those should be executed as late as possible.
3915 return [node for node in mygraph.leaf_nodes(**kwargs) \
3916 if isinstance(node, Package) and \
3917 (node.operation != "uninstall" or \
3918 node in scheduled_uninstalls)]
3920 # sys-apps/portage needs special treatment if ROOT="/"
3921 running_root = self._frozen_config._running_root.root
3922 runtime_deps = InternalPackageSet(
3923 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3924 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
3925 PORTAGE_PACKAGE_ATOM)
3926 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
3927 PORTAGE_PACKAGE_ATOM)
3930 running_portage = running_portage[0]
3932 running_portage = None
3934 if replacement_portage:
3935 replacement_portage = replacement_portage[0]
3937 replacement_portage = None
3939 if replacement_portage == running_portage:
3940 replacement_portage = None
3942 if replacement_portage is not None and \
3943 (running_portage is None or \
3944 running_portage.cpv != replacement_portage.cpv or \
3945 '9999' in replacement_portage.cpv or \
3946 'git' in replacement_portage.inherited):
3947 # update from running_portage to replacement_portage asap
3948 asap_nodes.append(replacement_portage)
3950 if running_portage is not None:
3952 portage_rdepend = self._select_atoms_highest_available(
3953 running_root, running_portage.metadata["RDEPEND"],
3954 myuse=self._pkg_use_enabled(running_portage),
3955 parent=running_portage, strict=False)
3956 except portage.exception.InvalidDependString as e:
3957 portage.writemsg("!!! Invalid RDEPEND in " + \
3958 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3959 (running_root, running_portage.cpv, e), noiselevel=-1)
3961 portage_rdepend = {running_portage : []}
3962 for atoms in portage_rdepend.values():
3963 runtime_deps.update(atom for atom in atoms \
3964 if not atom.blocker)
3966 # Merge libc asap, in order to account for implicit
3967 # dependencies. See bug #303567.
3968 for root in (running_root,):
3969 libc_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
3970 portage.const.LIBC_PACKAGE_ATOM)
3972 libc_pkg = libc_pkg[0]
3973 if libc_pkg.operation == 'merge':
3974 # Only add a dep when the version changes.
3975 if not libc_pkg.root_config.trees[
3976 'vartree'].dbapi.cpv_exists(libc_pkg.cpv):
3978 # If there's also an os-headers upgrade, we need to
3979 # pull that in first. See bug #328317.
3980 os_headers_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
3981 portage.const.OS_HEADERS_PACKAGE_ATOM)
3983 os_headers_pkg = os_headers_pkg[0]
3984 if os_headers_pkg.operation == 'merge':
3985 # Only add a dep when the version changes.
3986 if not os_headers_pkg.root_config.trees[
3987 'vartree'].dbapi.cpv_exists(os_headers_pkg.cpv):
3988 asap_nodes.append(os_headers_pkg)
3990 asap_nodes.append(libc_pkg)
3992 def gather_deps(ignore_priority, mergeable_nodes,
3993 selected_nodes, node):
3995 Recursively gather a group of nodes that RDEPEND on
3996 eachother. This ensures that they are merged as a group
3997 and get their RDEPENDs satisfied as soon as possible.
3999 if node in selected_nodes:
4001 if node not in mergeable_nodes:
4003 if node == replacement_portage and \
4004 mygraph.child_nodes(node,
4005 ignore_priority=priority_range.ignore_medium_soft):
4006 # Make sure that portage always has all of it's
4007 # RDEPENDs installed first.
4009 selected_nodes.add(node)
4010 for child in mygraph.child_nodes(node,
4011 ignore_priority=ignore_priority):
4012 if not gather_deps(ignore_priority,
4013 mergeable_nodes, selected_nodes, child):
4017 def ignore_uninst_or_med(priority):
4018 if priority is BlockerDepPriority.instance:
4020 return priority_range.ignore_medium(priority)
4022 def ignore_uninst_or_med_soft(priority):
4023 if priority is BlockerDepPriority.instance:
4025 return priority_range.ignore_medium_soft(priority)
4027 tree_mode = "--tree" in self._frozen_config.myopts
4028 # Tracks whether or not the current iteration should prefer asap_nodes
4029 # if available. This is set to False when the previous iteration
4030 # failed to select any nodes. It is reset whenever nodes are
4031 # successfully selected.
4034 # Controls whether or not the current iteration should drop edges that
4035 # are "satisfied" by installed packages, in order to solve circular
4036 # dependencies. The deep runtime dependencies of installed packages are
4037 # not checked in this case (bug #199856), so it must be avoided
4038 # whenever possible.
4039 drop_satisfied = False
4041 # State of variables for successive iterations that loosen the
4042 # criteria for node selection.
4044 # iteration prefer_asap drop_satisfied
4049 # If no nodes are selected on the last iteration, it is due to
4050 # unresolved blockers or circular dependencies.
4052 while not mygraph.empty():
4053 self._spinner_update()
4054 selected_nodes = None
4055 ignore_priority = None
4056 if drop_satisfied or (prefer_asap and asap_nodes):
4057 priority_range = DepPrioritySatisfiedRange
4059 priority_range = DepPriorityNormalRange
4060 if prefer_asap and asap_nodes:
4061 # ASAP nodes are merged before their soft deps. Go ahead and
4062 # select root nodes here if necessary, since it's typical for
4063 # the parent to have been removed from the graph already.
4064 asap_nodes = [node for node in asap_nodes \
4065 if mygraph.contains(node)]
4066 for node in asap_nodes:
4067 if not mygraph.child_nodes(node,
4068 ignore_priority=priority_range.ignore_soft):
4069 selected_nodes = [node]
4070 asap_nodes.remove(node)
4072 if not selected_nodes and \
4073 not (prefer_asap and asap_nodes):
4074 for i in range(priority_range.NONE,
4075 priority_range.MEDIUM_SOFT + 1):
4076 ignore_priority = priority_range.ignore_priority[i]
4077 nodes = get_nodes(ignore_priority=ignore_priority)
4079 # If there is a mixture of merges and uninstalls,
4080 # do the uninstalls first.
4082 good_uninstalls = []
4084 if node.operation == "uninstall":
4085 good_uninstalls.append(node)
4088 nodes = good_uninstalls
4092 if ignore_priority is None and not tree_mode:
4093 # Greedily pop all of these nodes since no
4094 # relationship has been ignored. This optimization
4095 # destroys --tree output, so it's disabled in tree
4097 selected_nodes = nodes
4099 # For optimal merge order:
4100 # * Only pop one node.
4101 # * Removing a root node (node without a parent)
4102 # will not produce a leaf node, so avoid it.
4103 # * It's normal for a selected uninstall to be a
4104 # root node, so don't check them for parents.
4106 if node.operation == "uninstall" or \
4107 mygraph.parent_nodes(node):
4108 selected_nodes = [node]
4114 if not selected_nodes:
4115 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4117 mergeable_nodes = set(nodes)
4118 if prefer_asap and asap_nodes:
4120 for i in range(priority_range.SOFT,
4121 priority_range.MEDIUM_SOFT + 1):
4122 ignore_priority = priority_range.ignore_priority[i]
4124 if not mygraph.parent_nodes(node):
4126 selected_nodes = set()
4127 if gather_deps(ignore_priority,
4128 mergeable_nodes, selected_nodes, node):
4131 selected_nodes = None
4135 if prefer_asap and asap_nodes and not selected_nodes:
4136 # We failed to find any asap nodes to merge, so ignore
4137 # them for the next iteration.
4141 if selected_nodes and ignore_priority is not None:
4142 # Try to merge ignored medium_soft deps as soon as possible
4143 # if they're not satisfied by installed packages.
4144 for node in selected_nodes:
4145 children = set(mygraph.child_nodes(node))
4146 soft = children.difference(
4147 mygraph.child_nodes(node,
4148 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4149 medium_soft = children.difference(
4150 mygraph.child_nodes(node,
4152 DepPrioritySatisfiedRange.ignore_medium_soft))
4153 medium_soft.difference_update(soft)
4154 for child in medium_soft:
4155 if child in selected_nodes:
4157 if child in asap_nodes:
4159 asap_nodes.append(child)
4161 if selected_nodes and len(selected_nodes) > 1:
4162 if not isinstance(selected_nodes, list):
4163 selected_nodes = list(selected_nodes)
4164 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4166 if not selected_nodes and not myblocker_uninstalls.is_empty():
4167 # An Uninstall task needs to be executed in order to
4168 # avoid conflict if possible.
4171 priority_range = DepPrioritySatisfiedRange
4173 priority_range = DepPriorityNormalRange
4175 mergeable_nodes = get_nodes(
4176 ignore_priority=ignore_uninst_or_med)
4178 min_parent_deps = None
4181 # FIXME: This loop can be extremely slow when
4182 # there of lots of blockers to solve
4183 # (especially the gather_deps part).
4184 for task in myblocker_uninstalls.leaf_nodes():
4185 # Do some sanity checks so that system or world packages
4186 # don't get uninstalled inappropriately here (only really
4187 # necessary when --complete-graph has not been enabled).
4189 if task in ignored_uninstall_tasks:
4192 if task in scheduled_uninstalls:
4193 # It's been scheduled but it hasn't
4194 # been executed yet due to dependence
4195 # on installation of blocking packages.
4198 root_config = self._frozen_config.roots[task.root]
4199 inst_pkg = self._pkg(task.cpv, "installed", root_config,
4202 if self._dynamic_config.digraph.contains(inst_pkg):
4205 forbid_overlap = False
4206 heuristic_overlap = False
4207 for blocker in myblocker_uninstalls.parent_nodes(task):
4208 if not eapi_has_strong_blocks(blocker.eapi):
4209 heuristic_overlap = True
4210 elif blocker.atom.blocker.overlap.forbid:
4211 forbid_overlap = True
4213 if forbid_overlap and running_root == task.root:
4216 if heuristic_overlap and running_root == task.root:
4217 # Never uninstall sys-apps/portage or it's essential
4218 # dependencies, except through replacement.
4220 runtime_dep_atoms = \
4221 list(runtime_deps.iterAtomsForPackage(task))
4222 except portage.exception.InvalidDependString as e:
4223 portage.writemsg("!!! Invalid PROVIDE in " + \
4224 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4225 (task.root, task.cpv, e), noiselevel=-1)
4229 # Don't uninstall a runtime dep if it appears
4230 # to be the only suitable one installed.
4232 vardb = root_config.trees["vartree"].dbapi
4233 for atom in runtime_dep_atoms:
4234 other_version = None
4235 for pkg in vardb.match_pkgs(atom):
4236 if pkg.cpv == task.cpv and \
4237 pkg.metadata["COUNTER"] == \
4238 task.metadata["COUNTER"]:
4242 if other_version is None:
4248 # For packages in the system set, don't take
4249 # any chances. If the conflict can't be resolved
4250 # by a normal replacement operation then abort.
4253 for atom in root_config.sets[
4254 "system"].iterAtomsForPackage(task):
4257 except portage.exception.InvalidDependString as e:
4258 portage.writemsg("!!! Invalid PROVIDE in " + \
4259 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4260 (task.root, task.cpv, e), noiselevel=-1)
4266 # Note that the world check isn't always
4267 # necessary since self._complete_graph() will
4268 # add all packages from the system and world sets to the
4269 # graph. This just allows unresolved conflicts to be
4270 # detected as early as possible, which makes it possible
4271 # to avoid calling self._complete_graph() when it is
4272 # unnecessary due to blockers triggering an abortion.
4274 # For packages in the world set, go ahead an uninstall
4275 # when necessary, as long as the atom will be satisfied
4276 # in the final state.
4277 graph_db = self._dynamic_config.mydbapi[task.root]
4280 for atom in root_config.sets[
4281 "selected"].iterAtomsForPackage(task):
4283 for pkg in graph_db.match_pkgs(atom):
4290 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
4292 except portage.exception.InvalidDependString as e:
4293 portage.writemsg("!!! Invalid PROVIDE in " + \
4294 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4295 (task.root, task.cpv, e), noiselevel=-1)
4301 # Check the deps of parent nodes to ensure that
4302 # the chosen task produces a leaf node. Maybe
4303 # this can be optimized some more to make the
4304 # best possible choice, but the current algorithm
4305 # is simple and should be near optimal for most
4307 self._spinner_update()
4308 mergeable_parent = False
4310 for parent in mygraph.parent_nodes(task):
4311 parent_deps.update(mygraph.child_nodes(parent,
4312 ignore_priority=priority_range.ignore_medium_soft))
4313 if parent in mergeable_nodes and \
4314 gather_deps(ignore_uninst_or_med_soft,
4315 mergeable_nodes, set(), parent):
4316 mergeable_parent = True
4318 if not mergeable_parent:
4321 parent_deps.remove(task)
4322 if min_parent_deps is None or \
4323 len(parent_deps) < min_parent_deps:
4324 min_parent_deps = len(parent_deps)
4327 if uninst_task is not None:
4328 # The uninstall is performed only after blocking
4329 # packages have been merged on top of it. File
4330 # collisions between blocking packages are detected
4331 # and removed from the list of files to be uninstalled.
4332 scheduled_uninstalls.add(uninst_task)
4333 parent_nodes = mygraph.parent_nodes(uninst_task)
4335 # Reverse the parent -> uninstall edges since we want
4336 # to do the uninstall after blocking packages have
4337 # been merged on top of it.
4338 mygraph.remove(uninst_task)
4339 for blocked_pkg in parent_nodes:
4340 mygraph.add(blocked_pkg, uninst_task,
4341 priority=BlockerDepPriority.instance)
4342 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4343 scheduler_graph.add(blocked_pkg, uninst_task,
4344 priority=BlockerDepPriority.instance)
4346 # Sometimes a merge node will render an uninstall
4347 # node unnecessary (due to occupying the same SLOT),
4348 # and we want to avoid executing a separate uninstall
4349 # task in that case.
4350 slot_node = self._dynamic_config.mydbapi[uninst_task.root
4351 ].match_pkgs(uninst_task.slot_atom)
4353 slot_node[0].operation == "merge":
4354 mygraph.add(slot_node[0], uninst_task,
4355 priority=BlockerDepPriority.instance)
4357 # Reset the state variables for leaf node selection and
4358 # continue trying to select leaf nodes.
4360 drop_satisfied = False
4363 if not selected_nodes:
4364 # Only select root nodes as a last resort. This case should
4365 # only trigger when the graph is nearly empty and the only
4366 # remaining nodes are isolated (no parents or children). Since
4367 # the nodes must be isolated, ignore_priority is not needed.
4368 selected_nodes = get_nodes()
4370 if not selected_nodes and not drop_satisfied:
4371 drop_satisfied = True
4374 if not selected_nodes and not myblocker_uninstalls.is_empty():
4375 # If possible, drop an uninstall task here in order to avoid
4376 # the circular deps code path. The corresponding blocker will
4377 # still be counted as an unresolved conflict.
4379 for node in myblocker_uninstalls.leaf_nodes():
4381 mygraph.remove(node)
4386 ignored_uninstall_tasks.add(node)
4389 if uninst_task is not None:
4390 # Reset the state variables for leaf node selection and
4391 # continue trying to select leaf nodes.
4393 drop_satisfied = False
4396 if not selected_nodes:
4397 self._dynamic_config._circular_deps_for_display = mygraph
4398 raise self._unknown_internal_error()
4400 # At this point, we've succeeded in selecting one or more nodes, so
4401 # reset state variables for leaf node selection.
4403 drop_satisfied = False
4405 mygraph.difference_update(selected_nodes)
4407 for node in selected_nodes:
4408 if isinstance(node, Package) and \
4409 node.operation == "nomerge":
4412 # Handle interactions between blockers
4413 # and uninstallation tasks.
4414 solved_blockers = set()
4416 if isinstance(node, Package) and \
4417 "uninstall" == node.operation:
4418 have_uninstall_task = True
4421 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
4422 inst_pkg = vardb.match_pkgs(node.slot_atom)
4424 # The package will be replaced by this one, so remove
4425 # the corresponding Uninstall task if necessary.
4426 inst_pkg = inst_pkg[0]
4427 uninst_task = Package(built=inst_pkg.built,
4428 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4429 metadata=inst_pkg.metadata,
4430 operation="uninstall",
4431 root_config=inst_pkg.root_config,
4432 type_name=inst_pkg.type_name)
4434 mygraph.remove(uninst_task)
4438 if uninst_task is not None and \
4439 uninst_task not in ignored_uninstall_tasks and \
4440 myblocker_uninstalls.contains(uninst_task):
4441 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4442 myblocker_uninstalls.remove(uninst_task)
4443 # Discard any blockers that this Uninstall solves.
4444 for blocker in blocker_nodes:
4445 if not myblocker_uninstalls.child_nodes(blocker):
4446 myblocker_uninstalls.remove(blocker)
4448 self._dynamic_config._unsolvable_blockers:
4449 solved_blockers.add(blocker)
4451 retlist.append(node)
4453 if (isinstance(node, Package) and \
4454 "uninstall" == node.operation) or \
4455 (uninst_task is not None and \
4456 uninst_task in scheduled_uninstalls):
4457 # Include satisfied blockers in the merge list
4458 # since the user might be interested and also
4459 # it serves as an indicator that blocking packages
4460 # will be temporarily installed simultaneously.
4461 for blocker in solved_blockers:
4462 retlist.append(blocker)
4464 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
4465 for node in myblocker_uninstalls.root_nodes():
4466 unsolvable_blockers.add(node)
4468 # If any Uninstall tasks need to be executed in order
4469 # to avoid a conflict, complete the graph with any
4470 # dependencies that may have been initially
4471 # neglected (to ensure that unsafe Uninstall tasks
4472 # are properly identified and blocked from execution).
4473 if have_uninstall_task and \
4475 not unsolvable_blockers:
4476 self._dynamic_config.myparams["complete"] = True
4477 if '--debug' in self._frozen_config.myopts:
4479 msg.append("enabling 'complete' depgraph mode " + \
4480 "due to uninstall task(s):")
4482 for node in retlist:
4483 if isinstance(node, Package) and \
4484 node.operation == 'uninstall':
4485 msg.append("\t%s" % (node,))
4486 writemsg_level("\n%s\n" % \
4487 "".join("%s\n" % line for line in msg),
4488 level=logging.DEBUG, noiselevel=-1)
4489 raise self._serialize_tasks_retry("")
4491 # Set satisfied state on blockers, but not before the
4492 # above retry path, since we don't want to modify the
4493 # state in that case.
4494 for node in retlist:
4495 if isinstance(node, Blocker):
4496 node.satisfied = True
4498 for blocker in unsolvable_blockers:
4499 retlist.append(blocker)
4501 if unsolvable_blockers and \
4502 not self._accept_blocker_conflicts():
4503 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
4504 self._dynamic_config._serialized_tasks_cache = retlist[:]
4505 self._dynamic_config._scheduler_graph = scheduler_graph
4506 raise self._unknown_internal_error()
4508 if self._dynamic_config._slot_collision_info and \
4509 not self._accept_blocker_conflicts():
4510 self._dynamic_config._serialized_tasks_cache = retlist[:]
4511 self._dynamic_config._scheduler_graph = scheduler_graph
4512 raise self._unknown_internal_error()
4514 return retlist, scheduler_graph
4516 def _show_circular_deps(self, mygraph):
4517 self._dynamic_config._circular_dependency_handler = \
4518 circular_dependency_handler(self, mygraph)
4519 handler = self._dynamic_config._circular_dependency_handler
4521 self._frozen_config.myopts.pop("--quiet", None)
4522 self._frozen_config.myopts["--verbose"] = True
4523 self._frozen_config.myopts["--tree"] = True
4524 portage.writemsg("\n\n", noiselevel=-1)
4525 self.display(handler.merge_list)
4526 prefix = colorize("BAD", " * ")
4527 portage.writemsg("\n", noiselevel=-1)
4528 portage.writemsg(prefix + "Error: circular dependencies:\n",
4530 portage.writemsg("\n", noiselevel=-1)
4532 if handler.circular_dep_message is None or \
4533 "--debug" in self._frozen_config.myopts:
4534 handler.debug_print()
4535 portage.writemsg("\n", noiselevel=-1)
4537 if handler.circular_dep_message is not None:
4538 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
4540 suggestions = handler.suggestions
4542 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
4543 if len(suggestions) == 1:
4544 writemsg("by applying the following change:\n", noiselevel=-1)
4546 writemsg("by applying " + colorize("bold", "any of") + \
4547 " the following changes:\n", noiselevel=-1)
4548 writemsg("".join(suggestions), noiselevel=-1)
4549 writemsg("\nNote that this change can be reverted, once the package has" + \
4550 " been installed.\n", noiselevel=-1)
4551 if handler.large_cycle_count:
4552 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
4553 "Several changes might be required to resolve all cycles.\n" + \
4554 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
4556 writemsg("\n\n", noiselevel=-1)
4557 writemsg(prefix + "Note that circular dependencies " + \
4558 "can often be avoided by temporarily\n", noiselevel=-1)
4559 writemsg(prefix + "disabling USE flags that trigger " + \
4560 "optional dependencies.\n", noiselevel=-1)
4562 def _show_merge_list(self):
4563 if self._dynamic_config._serialized_tasks_cache is not None and \
4564 not (self._dynamic_config._displayed_list and \
4565 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
4566 self._dynamic_config._displayed_list == \
4567 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
4568 display_list = self._dynamic_config._serialized_tasks_cache[:]
4569 if "--tree" in self._frozen_config.myopts:
4570 display_list.reverse()
4571 self.display(display_list)
4573 def _show_unsatisfied_blockers(self, blockers):
4574 self._show_merge_list()
4575 msg = "Error: The above package list contains " + \
4576 "packages which cannot be installed " + \
4577 "at the same time on the same system."
4578 prefix = colorize("BAD", " * ")
4579 portage.writemsg("\n", noiselevel=-1)
4580 for line in textwrap.wrap(msg, 70):
4581 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4583 # Display the conflicting packages along with the packages
4584 # that pulled them in. This is helpful for troubleshooting
4585 # cases in which blockers don't solve automatically and
4586 # the reasons are not apparent from the normal merge list
4590 for blocker in blockers:
4591 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
4592 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
4593 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4594 if not parent_atoms:
4595 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
4596 if atom is not None:
4597 parent_atoms = set([("@selected", atom)])
4599 conflict_pkgs[pkg] = parent_atoms
4602 # Reduce noise by pruning packages that are only
4603 # pulled in by other conflict packages.
4605 for pkg, parent_atoms in conflict_pkgs.items():
4606 relevant_parent = False
4607 for parent, atom in parent_atoms:
4608 if parent not in conflict_pkgs:
4609 relevant_parent = True
4611 if not relevant_parent:
4612 pruned_pkgs.add(pkg)
4613 for pkg in pruned_pkgs:
4614 del conflict_pkgs[pkg]
4620 # Max number of parents shown, to avoid flooding the display.
4622 for pkg, parent_atoms in conflict_pkgs.items():
4626 # Prefer packages that are not directly involved in a conflict.
4627 for parent_atom in parent_atoms:
4628 if len(pruned_list) >= max_parents:
4630 parent, atom = parent_atom
4631 if parent not in conflict_pkgs:
4632 pruned_list.add(parent_atom)
4634 for parent_atom in parent_atoms:
4635 if len(pruned_list) >= max_parents:
4637 pruned_list.add(parent_atom)
4639 omitted_parents = len(parent_atoms) - len(pruned_list)
4640 msg.append(indent + "%s pulled in by\n" % pkg)
4642 for parent_atom in pruned_list:
4643 parent, atom = parent_atom
4644 msg.append(2*indent)
4645 if isinstance(parent,
4646 (PackageArg, AtomArg)):
4647 # For PackageArg and AtomArg types, it's
4648 # redundant to display the atom attribute.
4649 msg.append(str(parent))
4651 # Display the specific atom from SetArg or
4653 msg.append("%s required by %s" % (atom, parent))
4657 msg.append(2*indent)
4658 msg.append("(and %d more)\n" % omitted_parents)
4662 sys.stderr.write("".join(msg))
4665 if "--quiet" not in self._frozen_config.myopts:
4666 show_blocker_docs_link()
4668 def display(self, mylist, favorites=[], verbosity=None):
4670 # This is used to prevent display_problems() from
4671 # redundantly displaying this exact same merge list
4672 # again via _show_merge_list().
4673 self._dynamic_config._displayed_list = mylist
4675 return display(self, mylist, favorites, verbosity)
4677 def display_problems(self):
4679 Display problems with the dependency graph such as slot collisions.
4680 This is called internally by display() to show the problems _after_
4681 the merge list where it is most likely to be seen, but if display()
4682 is not going to be called then this method should be called explicitly
4683 to ensure that the user is notified of problems with the graph.
4685 All output goes to stderr, except for unsatisfied dependencies which
4686 go to stdout for parsing by programs such as autounmask.
4689 # Note that show_masked_packages() sends it's output to
4690 # stdout, and some programs such as autounmask parse the
4691 # output in cases when emerge bails out. However, when
4692 # show_masked_packages() is called for installed packages
4693 # here, the message is a warning that is more appropriate
4694 # to send to stderr, so temporarily redirect stdout to
4695 # stderr. TODO: Fix output code so there's a cleaner way
4696 # to redirect everything to stderr.
4701 sys.stdout = sys.stderr
4702 self._display_problems()
4708 # This goes to stdout for parsing by programs like autounmask.
4709 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4710 self._show_unsatisfied_dep(*pargs, **kwargs)
4712 def _display_problems(self):
4713 if self._dynamic_config._circular_deps_for_display is not None:
4714 self._show_circular_deps(
4715 self._dynamic_config._circular_deps_for_display)
4717 # The user is only notified of a slot conflict if
4718 # there are no unresolvable blocker conflicts.
4719 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4720 self._show_unsatisfied_blockers(
4721 self._dynamic_config._unsatisfied_blockers_for_display)
4722 elif self._dynamic_config._slot_collision_info:
4723 self._show_slot_collision_notice()
4725 self._show_missed_update()
4727 def get_dep_chain(pkg):
4728 traversed_nodes = set()
4733 all_parents = self._dynamic_config._parent_atoms
4734 while node is not None:
4735 traversed_nodes.add(node)
4736 if isinstance(node, DependencyArg):
4741 msg += _unicode_decode('required by %s') % (node,)
4742 elif node is not pkg:
4743 for ppkg, patom in all_parents[child]:
4745 atom = patom.unevaluated_atom
4749 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
4750 if priority.buildtime:
4751 dep_strings.add(node.metadata["DEPEND"])
4752 if priority.runtime:
4753 dep_strings.add(node.metadata["RDEPEND"])
4754 if priority.runtime_post:
4755 dep_strings.add(node.metadata["PDEPEND"])
4757 affecting_use = set()
4758 for dep_str in dep_strings:
4759 affecting_use.update(extract_affecting_use(dep_str, atom))
4761 #Don't show flags as 'affecting' if the user can't change them,
4762 pkgsettings = self._frozen_config.pkgsettings[node.root]
4763 pkgsettings.setcpv(node)
4764 affecting_use.difference_update(pkgsettings.usemask, \
4765 pkgsettings.useforce)
4770 for flag in affecting_use:
4771 if flag in self._pkg_use_enabled(node):
4774 usedep.append("-"+flag)
4775 pkg_name += "[%s]" % ",".join(usedep)
4781 msg += 'required by =%s' % pkg_name
4783 if node not in self._dynamic_config.digraph:
4784 # The parent is not in the graph due to backtracking.
4787 # When traversing to parents, prefer arguments over packages
4788 # since arguments are root nodes. Never traverse the same
4789 # package twice, in order to prevent an infinite loop.
4790 selected_parent = None
4791 for parent in self._dynamic_config.digraph.parent_nodes(node):
4792 if parent in traversed_nodes:
4794 if isinstance(parent, DependencyArg):
4795 if self._dynamic_config.digraph.parent_nodes(parent):
4796 selected_parent = parent
4803 msg += _unicode_decode(
4804 'required by %s (argument)') % (parent,)
4805 selected_parent = None
4808 selected_parent = parent
4810 node = selected_parent
4814 unstable_keyword_msg = []
4815 for pkg in self._dynamic_config._needed_unstable_keywords:
4816 self._show_merge_list()
4817 if pkg in self._dynamic_config.digraph:
4818 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4819 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
4820 use=self._pkg_use_enabled(pkg))
4821 for reason in mreasons:
4822 if reason.unmask_hint and \
4823 reason.unmask_hint.key == 'unstable keyword':
4824 keyword = reason.unmask_hint.value
4826 unstable_keyword_msg.append(get_dep_chain(pkg))
4827 unstable_keyword_msg.append("=%s %s\n" % (pkg.cpv, keyword))
4829 use_changes_msg = []
4830 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
4831 self._show_merge_list()
4832 if pkg in self._dynamic_config.digraph:
4833 changes = needed_use_config_change[1]
4835 for flag, state in changes.items():
4837 adjustments.append(flag)
4839 adjustments.append("-" + flag)
4840 use_changes_msg.append(get_dep_chain(pkg))
4841 use_changes_msg.append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
4844 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
4845 self._show_merge_list()
4846 if pkg in self._dynamic_config.digraph:
4847 license_msg.append(get_dep_chain(pkg))
4848 license_msg.append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
4850 if unstable_keyword_msg:
4851 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
4852 " are necessary to proceed:\n", noiselevel=-1)
4853 writemsg_stdout("".join(unstable_keyword_msg), noiselevel=-1)
4856 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
4857 " are necessary to proceed:\n", noiselevel=-1)
4858 writemsg_stdout("".join(use_changes_msg), noiselevel=-1)
4861 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
4862 " are necessary to proceed:\n", noiselevel=-1)
4863 writemsg_stdout("".join(license_msg), noiselevel=-1)
4865 # TODO: Add generic support for "set problem" handlers so that
4866 # the below warnings aren't special cases for world only.
4868 if self._dynamic_config._missing_args:
4869 world_problems = False
4870 if "world" in self._dynamic_config.sets[
4871 self._frozen_config.target_root].sets:
4872 # Filter out indirect members of world (from nested sets)
4873 # since only direct members of world are desired here.
4874 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
4875 for arg, atom in self._dynamic_config._missing_args:
4876 if arg.name in ("selected", "world") and atom in world_set:
4877 world_problems = True
4881 sys.stderr.write("\n!!! Problems have been " + \
4882 "detected with your world file\n")
4883 sys.stderr.write("!!! Please run " + \
4884 green("emaint --check world")+"\n\n")
4886 if self._dynamic_config._missing_args:
4887 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4888 " Ebuilds for the following packages are either all\n")
4889 sys.stderr.write(colorize("BAD", "!!!") + \
4890 " masked or don't exist:\n")
4891 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4892 self._dynamic_config._missing_args) + "\n")
4894 if self._dynamic_config._pprovided_args:
4896 for arg, atom in self._dynamic_config._pprovided_args:
4897 if isinstance(arg, SetArg):
4899 arg_atom = (atom, atom)
4902 arg_atom = (arg.arg, atom)
4903 refs = arg_refs.setdefault(arg_atom, [])
4904 if parent not in refs:
4907 msg.append(bad("\nWARNING: "))
4908 if len(self._dynamic_config._pprovided_args) > 1:
4909 msg.append("Requested packages will not be " + \
4910 "merged because they are listed in\n")
4912 msg.append("A requested package will not be " + \
4913 "merged because it is listed in\n")
4914 msg.append("package.provided:\n\n")
4915 problems_sets = set()
4916 for (arg, atom), refs in arg_refs.items():
4919 problems_sets.update(refs)
4921 ref_string = ", ".join(["'%s'" % name for name in refs])
4922 ref_string = " pulled in by " + ref_string
4923 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4925 if "selected" in problems_sets or "world" in problems_sets:
4926 msg.append("This problem can be solved in one of the following ways:\n\n")
4927 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4928 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4929 msg.append(" C) Remove offending entries from package.provided.\n\n")
4930 msg.append("The best course of action depends on the reason that an offending\n")
4931 msg.append("package.provided entry exists.\n\n")
4932 sys.stderr.write("".join(msg))
4934 masked_packages = []
4935 for pkg in self._dynamic_config._masked_license_updates:
4936 root_config = pkg.root_config
4937 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4938 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4939 masked_packages.append((root_config, pkgsettings,
4940 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
4942 writemsg("\n" + colorize("BAD", "!!!") + \
4943 " The following updates are masked by LICENSE changes:\n",
4945 show_masked_packages(masked_packages)
4947 writemsg("\n", noiselevel=-1)
4949 masked_packages = []
4950 for pkg in self._dynamic_config._masked_installed:
4951 root_config = pkg.root_config
4952 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4953 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
4954 masked_packages.append((root_config, pkgsettings,
4955 pkg.cpv, "installed", pkg.metadata, mreasons))
4957 writemsg("\n" + colorize("BAD", "!!!") + \
4958 " The following installed packages are masked:\n",
4960 show_masked_packages(masked_packages)
4962 writemsg("\n", noiselevel=-1)
4964 def saveNomergeFavorites(self):
4965 """Find atoms in favorites that are not in the mergelist and add them
4966 to the world file if necessary."""
4967 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4968 "--oneshot", "--onlydeps", "--pretend"):
4969 if x in self._frozen_config.myopts:
4971 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4972 world_set = root_config.sets["selected"]
4974 world_locked = False
4975 if hasattr(world_set, "lock"):
4979 if hasattr(world_set, "load"):
4980 world_set.load() # maybe it's changed on disk
4982 args_set = self._dynamic_config.sets[
4983 self._frozen_config.target_root].sets['__non_set_args__']
4984 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
4985 added_favorites = set()
4986 for x in self._dynamic_config._set_nodes:
4987 pkg_type = x.type_name
4990 pkg_status = x.operation
4992 if pkg_status != "nomerge":
4996 myfavkey = create_world_atom(x, args_set, root_config)
4998 if myfavkey in added_favorites:
5000 added_favorites.add(myfavkey)
5001 except portage.exception.InvalidDependString as e:
5002 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5003 (pkg_key, str(e)), noiselevel=-1)
5004 writemsg("!!! see '%s'\n\n" % os.path.join(
5005 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
5008 for arg in self._dynamic_config._initial_arg_list:
5009 if not isinstance(arg, SetArg):
5012 if k in ("selected", "world") or \
5013 not root_config.sets[k].world_candidate:
5018 all_added.append(SETPREFIX + k)
5019 all_added.extend(added_favorites)
5022 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
5023 colorize("INFORM", str(a)), noiselevel=-1)
5025 world_set.update(all_added)
5030 def _loadResumeCommand(self, resume_data, skip_masked=True,
5033 Add a resume command to the graph and validate it in the process. This
5034 will raise a PackageNotFound exception if a package is not available.
5039 if not isinstance(resume_data, dict):
5042 mergelist = resume_data.get("mergelist")
5043 if not isinstance(mergelist, list):
5046 favorites = resume_data.get("favorites")
5047 args_set = self._dynamic_config.sets[
5048 self._frozen_config.target_root].sets['__non_set_args__']
5049 if isinstance(favorites, list):
5050 args = self._load_favorites(favorites)
5054 fakedb = self._dynamic_config.mydbapi
5055 trees = self._frozen_config.trees
5056 serialized_tasks = []
5059 if not (isinstance(x, list) and len(x) == 4):
5061 pkg_type, myroot, pkg_key, action = x
5062 if pkg_type not in self.pkg_tree_map:
5064 if action != "merge":
5066 root_config = self._frozen_config.roots[myroot]
5068 # Use the resume "favorites" list to see if a repo was specified
5070 depgraph_sets = self._dynamic_config.sets[root_config.root]
5072 for atom in depgraph_sets.atoms.getAtoms():
5073 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
5077 atom = "=" + pkg_key
5079 atom = atom + _repo_separator + repo
5082 atom = Atom(atom, allow_repo=True)
5087 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
5088 if not self._pkg_visibility_check(pkg) or \
5089 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
5090 modified_use=self._pkg_use_enabled(pkg)):
5094 # It does no exist or it is corrupt.
5096 # TODO: log these somewhere
5098 raise portage.exception.PackageNotFound(pkg_key)
5100 if "merge" == pkg.operation and \
5101 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
5102 modified_use=self._pkg_use_enabled(pkg)):
5105 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
5107 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
5109 self._dynamic_config._unsatisfied_deps_for_display.append(
5110 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5112 fakedb[myroot].cpv_inject(pkg)
5113 serialized_tasks.append(pkg)
5114 self._spinner_update()
5116 if self._dynamic_config._unsatisfied_deps_for_display:
5119 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5120 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5121 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5123 self._select_package = self._select_pkg_from_graph
5124 self._dynamic_config.myparams["selective"] = True
5125 # Always traverse deep dependencies in order to account for
5126 # potentially unsatisfied dependencies of installed packages.
5127 # This is necessary for correct --keep-going or --resume operation
5128 # in case a package from a group of circularly dependent packages
5129 # fails. In this case, a package which has recently been installed
5130 # may have an unsatisfied circular dependency (pulled in by
5131 # PDEPEND, for example). So, even though a package is already
5132 # installed, it may not have all of it's dependencies satisfied, so
5133 # it may not be usable. If such a package is in the subgraph of
5134 # deep depenedencies of a scheduled build, that build needs to
5135 # be cancelled. In order for this type of situation to be
5136 # recognized, deep traversal of dependencies is required.
5137 self._dynamic_config.myparams["deep"] = True
5139 for task in serialized_tasks:
5140 if isinstance(task, Package) and \
5141 task.operation == "merge":
5142 if not self._add_pkg(task, None):
5145 # Packages for argument atoms need to be explicitly
5146 # added via _add_pkg() so that they are included in the
5147 # digraph (needed at least for --tree display).
5148 for arg in self._expand_set_args(args, add_to_digraph=True):
5149 for atom in arg.pset.getAtoms():
5150 pkg, existing_node = self._select_package(
5151 arg.root_config.root, atom)
5152 if existing_node is None and \
5154 if not self._add_pkg(pkg, Dependency(atom=atom,
5155 root=pkg.root, parent=arg)):
5158 # Allow unsatisfied deps here to avoid showing a masking
5159 # message for an unsatisfied dep that isn't necessarily
5161 if not self._create_graph(allow_unsatisfied=True):
5164 unsatisfied_deps = []
5165 for dep in self._dynamic_config._unsatisfied_deps:
5166 if not isinstance(dep.parent, Package):
5168 if dep.parent.operation == "merge":
5169 unsatisfied_deps.append(dep)
5172 # For unsatisfied deps of installed packages, only account for
5173 # them if they are in the subgraph of dependencies of a package
5174 # which is scheduled to be installed.
5175 unsatisfied_install = False
5177 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5179 node = dep_stack.pop()
5180 if not isinstance(node, Package):
5182 if node.operation == "merge":
5183 unsatisfied_install = True
5185 if node in traversed:
5188 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5190 if unsatisfied_install:
5191 unsatisfied_deps.append(dep)
5193 if masked_tasks or unsatisfied_deps:
5194 # This probably means that a required package
5195 # was dropped via --skipfirst. It makes the
5196 # resume list invalid, so convert it to a
5197 # UnsatisfiedResumeDep exception.
5198 raise self.UnsatisfiedResumeDep(self,
5199 masked_tasks + unsatisfied_deps)
5200 self._dynamic_config._serialized_tasks_cache = None
5203 except self._unknown_internal_error:
5208 def _load_favorites(self, favorites):
5210 Use a list of favorites to resume state from a
5211 previous select_files() call. This creates similar
5212 DependencyArg instances to those that would have
5213 been created by the original select_files() call.
5214 This allows Package instances to be matched with
5215 DependencyArg instances during graph creation.
5217 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5218 sets = root_config.sets
5219 depgraph_sets = self._dynamic_config.sets[root_config.root]
5222 if not isinstance(x, basestring):
5224 if x in ("system", "world"):
5226 if x.startswith(SETPREFIX):
5227 s = x[len(SETPREFIX):]
5230 if s in depgraph_sets.sets:
5233 depgraph_sets.sets[s] = pset
5234 args.append(SetArg(arg=x, pset=pset,
5235 root_config=root_config))
5238 x = Atom(x, allow_repo=True)
5239 except portage.exception.InvalidAtom:
5241 args.append(AtomArg(arg=x, atom=x,
5242 root_config=root_config))
5244 self._set_args(args)
5247 class UnsatisfiedResumeDep(portage.exception.PortageException):
5249 A dependency of a resume list is not installed. This
5250 can occur when a required package is dropped from the
5251 merge list via --skipfirst.
5253 def __init__(self, depgraph, value):
5254 portage.exception.PortageException.__init__(self, value)
5255 self.depgraph = depgraph
5257 class _internal_exception(portage.exception.PortageException):
5258 def __init__(self, value=""):
5259 portage.exception.PortageException.__init__(self, value)
5261 class _unknown_internal_error(_internal_exception):
5263 Used by the depgraph internally to terminate graph creation.
5264 The specific reason for the failure should have been dumped
5265 to stderr, unfortunately, the exact reason for the failure
5269 class _serialize_tasks_retry(_internal_exception):
5271 This is raised by the _serialize_tasks() method when it needs to
5272 be called again for some reason. The only case that it's currently
5273 used for is when neglected dependencies need to be added to the
5274 graph in order to avoid making a potentially unsafe decision.
5277 class _backtrack_mask(_internal_exception):
5279 This is raised by _show_unsatisfied_dep() when it's called with
5280 check_backtrack=True and a matching package has been masked by
5284 def need_restart(self):
5285 return self._dynamic_config._need_restart
5287 def success_without_autounmask(self):
5288 return self._dynamic_config._success_without_autounmask
5290 def get_backtrack_infos(self):
5291 return self._dynamic_config._backtrack_infos
5294 class _dep_check_composite_db(dbapi):
5296 A dbapi-like interface that is optimized for use in dep_check() calls.
5297 This is built on top of the existing depgraph package selection logic.
5298 Some packages that have been added to the graph may be masked from this
5299 view in order to influence the atom preference selection that occurs
5302 def __init__(self, depgraph, root):
5303 dbapi.__init__(self)
5304 self._depgraph = depgraph
5306 self._match_cache = {}
5307 self._cpv_pkg_map = {}
5309 def _clear_cache(self):
5310 self._match_cache.clear()
5311 self._cpv_pkg_map.clear()
5313 def match(self, atom):
5314 ret = self._match_cache.get(atom)
5319 atom = self._dep_expand(atom)
5320 pkg, existing = self._depgraph._select_package(self._root, atom)
5324 # Return the highest available from select_package() as well as
5325 # any matching slots in the graph db.
5327 slots.add(pkg.metadata["SLOT"])
5328 if pkg.cp.startswith("virtual/"):
5329 # For new-style virtual lookahead that occurs inside
5330 # dep_check(), examine all slots. This is needed
5331 # so that newer slots will not unnecessarily be pulled in
5332 # when a satisfying lower slot is already installed. For
5333 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5334 # there's no need to pull in a newer slot to satisfy a
5335 # virtual/jdk dependency.
5336 for db, pkg_type, built, installed, db_keys in \
5337 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5338 for cpv in db.match(atom):
5339 if portage.cpv_getkey(cpv) != pkg.cp:
5341 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5343 if self._visible(pkg):
5344 self._cpv_pkg_map[pkg.cpv] = pkg
5346 slots.remove(pkg.metadata["SLOT"])
5348 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5349 pkg, existing = self._depgraph._select_package(
5350 self._root, slot_atom)
5353 if not self._visible(pkg):
5355 self._cpv_pkg_map[pkg.cpv] = pkg
5358 self._cpv_sort_ascending(ret)
5359 self._match_cache[orig_atom] = ret
5362 def _visible(self, pkg):
5363 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5365 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5366 except (StopIteration, portage.exception.InvalidDependString):
5370 if pkg.installed and not self._depgraph._pkg_visibility_check(pkg):
5372 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5373 self._root].get(pkg.slot_atom)
5374 if in_graph is None:
5375 # Mask choices for packages which are not the highest visible
5376 # version within their slot (since they usually trigger slot
5378 highest_visible, in_graph = self._depgraph._select_package(
5379 self._root, pkg.slot_atom)
5380 # Note: highest_visible is not necessarily the real highest
5381 # visible, especially when --update is not enabled, so use
5382 # < operator instead of !=.
5383 if pkg < highest_visible:
5385 elif in_graph != pkg:
5386 # Mask choices for packages that would trigger a slot
5387 # conflict with a previously selected package.
5391 def _dep_expand(self, atom):
5393 This is only needed for old installed packages that may
5394 contain atoms that are not fully qualified with a specific
5395 category. Emulate the cpv_expand() function that's used by
5396 dbapi.match() in cases like this. If there are multiple
5397 matches, it's often due to a new-style virtual that has
5398 been added, so try to filter those out to avoid raising
5401 root_config = self._depgraph.roots[self._root]
5403 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5404 if len(expanded_atoms) > 1:
5405 non_virtual_atoms = []
5406 for x in expanded_atoms:
5407 if not x.cp.startswith("virtual/"):
5408 non_virtual_atoms.append(x)
5409 if len(non_virtual_atoms) == 1:
5410 expanded_atoms = non_virtual_atoms
5411 if len(expanded_atoms) > 1:
5412 # compatible with portage.cpv_expand()
5413 raise portage.exception.AmbiguousPackageName(
5414 [x.cp for x in expanded_atoms])
5416 atom = expanded_atoms[0]
5418 null_atom = Atom(insert_category_into_atom(atom, "null"))
5419 cat, atom_pn = portage.catsplit(null_atom.cp)
5420 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5422 # Allow the resolver to choose which virtual.
5423 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5428 def aux_get(self, cpv, wants):
5429 metadata = self._cpv_pkg_map[cpv].metadata
5430 return [metadata.get(x, "") for x in wants]
5432 def match_pkgs(self, atom):
5433 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5435 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5437 if "--quiet" in myopts:
5438 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5439 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
5440 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5441 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
5444 s = search(root_config, spinner, "--searchdesc" in myopts,
5445 "--quiet" not in myopts, "--usepkg" in myopts,
5446 "--usepkgonly" in myopts)
5447 null_cp = portage.dep_getkey(insert_category_into_atom(
5449 cat, atom_pn = portage.catsplit(null_cp)
5450 s.searchkey = atom_pn
5451 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5454 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5455 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
5457 def insert_category_into_atom(atom, category):
5458 alphanum = re.search(r'\w', atom)
5460 ret = atom[:alphanum.start()] + "%s/" % category + \
5461 atom[alphanum.start():]
5466 def _spinner_start(spinner, myopts):
5469 if "--quiet" not in myopts and \
5470 ("--pretend" in myopts or "--ask" in myopts or \
5471 "--tree" in myopts or "--verbose" in myopts):
5473 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5475 elif "--buildpkgonly" in myopts:
5479 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5480 if "--unordered-display" in myopts:
5481 portage.writemsg_stdout("\n" + \
5482 darkgreen("These are the packages that " + \
5483 "would be %s:" % action) + "\n\n")
5485 portage.writemsg_stdout("\n" + \
5486 darkgreen("These are the packages that " + \
5487 "would be %s, in reverse order:" % action) + "\n\n")
5489 portage.writemsg_stdout("\n" + \
5490 darkgreen("These are the packages that " + \
5491 "would be %s, in order:" % action) + "\n\n")
5493 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5494 if not show_spinner:
5495 spinner.update = spinner.update_quiet
5498 portage.writemsg_stdout("Calculating dependencies ")
5500 def _spinner_stop(spinner):
5501 if spinner is None or \
5502 spinner.update is spinner.update_quiet:
5505 portage.writemsg_stdout("\b\b... done!\n")
5507 def backtrack_depgraph(settings, trees, myopts, myparams,
5508 myaction, myfiles, spinner):
5510 Raises PackageSetNotFound if myfiles contains a missing package set.
5512 _spinner_start(spinner, myopts)
5514 return _backtrack_depgraph(settings, trees, myopts, myparams,
5515 myaction, myfiles, spinner)
5517 _spinner_stop(spinner)
5520 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
5522 max_depth = myopts.get('--backtrack', 5)
5523 allow_backtracking = max_depth > 0
5524 backtracker = Backtracker(max_depth)
5526 frozen_config = _frozen_depgraph_config(settings, trees,
5530 backtrack_parameters = backtracker.get()
5532 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5533 frozen_config=frozen_config,
5534 allow_backtracking=allow_backtracking,
5535 backtrack_parameters=backtrack_parameters)
5536 success, favorites = mydepgraph.select_files(myfiles)
5538 if success or mydepgraph.success_without_autounmask():
5540 elif mydepgraph.need_restart():
5541 backtracker.feedback(mydepgraph.get_backtrack_infos())
5543 if not (success or mydepgraph.success_without_autounmask()) and backtracker.backtracked():
5544 backtrack_parameters = backtracker.get_best_run()
5546 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5547 frozen_config=frozen_config,
5548 allow_backtracking=False,
5549 backtrack_parameters=backtrack_parameters)
5550 success, favorites = mydepgraph.select_files(myfiles)
5552 return (success, mydepgraph, favorites)
5555 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5557 Raises PackageSetNotFound if myfiles contains a missing package set.
5559 _spinner_start(spinner, myopts)
5561 return _resume_depgraph(settings, trees, mtimedb, myopts,
5564 _spinner_stop(spinner)
5566 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5568 Construct a depgraph for the given resume list. This will raise
5569 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5570 TODO: Return reasons for dropped_tasks, for display/logging.
5572 @returns: (success, depgraph, dropped_tasks)
5575 skip_unsatisfied = True
5576 mergelist = mtimedb["resume"]["mergelist"]
5577 dropped_tasks = set()
5578 frozen_config = _frozen_depgraph_config(settings, trees,
5581 mydepgraph = depgraph(settings, trees,
5582 myopts, myparams, spinner, frozen_config=frozen_config)
5584 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5585 skip_masked=skip_masked)
5586 except depgraph.UnsatisfiedResumeDep as e:
5587 if not skip_unsatisfied:
5590 graph = mydepgraph._dynamic_config.digraph
5591 unsatisfied_parents = dict((dep.parent, dep.parent) \
5593 traversed_nodes = set()
5594 unsatisfied_stack = list(unsatisfied_parents)
5595 while unsatisfied_stack:
5596 pkg = unsatisfied_stack.pop()
5597 if pkg in traversed_nodes:
5599 traversed_nodes.add(pkg)
5601 # If this package was pulled in by a parent
5602 # package scheduled for merge, removing this
5603 # package may cause the the parent package's
5604 # dependency to become unsatisfied.
5605 for parent_node in graph.parent_nodes(pkg):
5606 if not isinstance(parent_node, Package) \
5607 or parent_node.operation not in ("merge", "nomerge"):
5610 graph.child_nodes(parent_node,
5611 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5612 if pkg in unsatisfied:
5613 unsatisfied_parents[parent_node] = parent_node
5614 unsatisfied_stack.append(parent_node)
5616 pruned_mergelist = []
5618 if isinstance(x, list) and \
5619 tuple(x) not in unsatisfied_parents:
5620 pruned_mergelist.append(x)
5622 # If the mergelist doesn't shrink then this loop is infinite.
5623 if len(pruned_mergelist) == len(mergelist):
5624 # This happens if a package can't be dropped because
5625 # it's already installed, but it has unsatisfied PDEPEND.
5627 mergelist[:] = pruned_mergelist
5629 # Exclude installed packages that have been removed from the graph due
5630 # to failure to build/install runtime dependencies after the dependent
5631 # package has already been installed.
5632 dropped_tasks.update(pkg for pkg in \
5633 unsatisfied_parents if pkg.operation != "nomerge")
5634 mydepgraph.break_refs(unsatisfied_parents)
5636 del e, graph, traversed_nodes, \
5637 unsatisfied_parents, unsatisfied_stack
5641 return (success, mydepgraph, dropped_tasks)
5643 def get_mask_info(root_config, cpv, pkgsettings,
5644 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
5647 metadata = dict(zip(db_keys,
5648 db.aux_get(cpv, db_keys, myrepo=myrepo)))
5652 if metadata is None:
5653 mreasons = ["corruption"]
5655 eapi = metadata['EAPI']
5658 if not portage.eapi_is_supported(eapi):
5659 mreasons = ['EAPI %s' % eapi]
5661 pkg = Package(type_name=pkg_type, root_config=root_config,
5662 cpv=cpv, built=built, installed=installed, metadata=metadata)
5665 if _pkg_use_enabled is not None:
5666 modified_use = _pkg_use_enabled(pkg)
5668 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
5670 return metadata, mreasons
5672 def show_masked_packages(masked_packages):
5673 shown_licenses = set()
5674 shown_comments = set()
5675 # Maybe there is both an ebuild and a binary. Only
5676 # show one of them to avoid redundant appearance.
5678 have_eapi_mask = False
5679 for (root_config, pkgsettings, cpv, repo,
5680 metadata, mreasons) in masked_packages:
5683 output_cpv += _repo_separator + repo
5684 if output_cpv in shown_cpvs:
5686 shown_cpvs.add(output_cpv)
5687 comment, filename = None, None
5688 if "package.mask" in mreasons:
5689 comment, filename = \
5690 portage.getmaskingreason(
5691 cpv, metadata=metadata,
5692 settings=pkgsettings,
5693 portdb=root_config.trees["porttree"].dbapi,
5694 return_location=True)
5695 missing_licenses = []
5697 if not portage.eapi_is_supported(metadata["EAPI"]):
5698 have_eapi_mask = True
5700 missing_licenses = \
5701 pkgsettings._getMissingLicenses(
5703 except portage.exception.InvalidDependString:
5704 # This will have already been reported
5705 # above via mreasons.
5708 writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
5710 if comment and comment not in shown_comments:
5711 writemsg_stdout(filename + ":\n" + comment + "\n",
5713 shown_comments.add(comment)
5714 portdb = root_config.trees["porttree"].dbapi
5715 for l in missing_licenses:
5716 l_path = portdb.findLicensePath(l)
5717 if l in shown_licenses:
5719 msg = ("A copy of the '%s' license" + \
5720 " is located at '%s'.\n\n") % (l, l_path)
5721 writemsg_stdout(msg, noiselevel=-1)
5722 shown_licenses.add(l)
5723 return have_eapi_mask
5725 def show_mask_docs():
5726 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
5727 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
5729 def show_blocker_docs_link():
5730 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
5731 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
5732 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
5734 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
5735 return [mreason.message for \
5736 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
5738 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
5739 mreasons = _getmaskingstatus(
5740 pkg, settings=pkgsettings,
5741 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
5743 if not pkg.installed:
5744 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5745 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
5746 pkg.metadata["CHOST"]))
5748 if pkg.metadata["REQUIRED_USE"] and \
5749 eapi_has_required_use(pkg.metadata["EAPI"]):
5750 required_use = pkg.metadata["REQUIRED_USE"]
5752 use = pkg.use.enabled
5754 required_use_is_sat = check_required_use(
5755 required_use, use, pkg.iuse.is_valid_flag)
5756 except portage.exception.InvalidDependString:
5757 mreasons.append(_MaskReason("invalid", "invalid: REQUIRED_USE"))
5759 if not required_use_is_sat:
5760 msg = "violated use flag constraints: '%s'" % required_use
5761 mreasons.append(_MaskReason("REQUIRED_USE", "REQUIRED_USE violated"))
5764 for msg_type, msgs in pkg.invalid.items():
5767 _MaskReason("invalid", "invalid: %s" % (msg,)))
5769 if not pkg.metadata["SLOT"]:
5771 _MaskReason("invalid", "SLOT: undefined"))