1 # Copyright 1999-2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
11 from itertools import chain
14 from portage import os
15 from portage import _unicode_decode
16 from portage.const import PORTAGE_PACKAGE_ATOM
17 from portage.dbapi import dbapi
18 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
19 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
20 from portage.exception import InvalidAtom
21 from portage.output import colorize, create_color_func, \
23 bad = create_color_func("BAD")
24 from portage.package.ebuild.getmaskingstatus import \
25 _getmaskingstatus, _MaskReason
26 from portage._sets import SETPREFIX
27 from portage._sets.base import InternalPackageSet
28 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
29 from portage.util import writemsg_level
30 from portage.util.digraph import digraph
32 from _emerge.AtomArg import AtomArg
33 from _emerge.Blocker import Blocker
34 from _emerge.BlockerCache import BlockerCache
35 from _emerge.BlockerDepPriority import BlockerDepPriority
36 from _emerge.countdown import countdown
37 from _emerge.create_world_atom import create_world_atom
38 from _emerge.Dependency import Dependency
39 from _emerge.DependencyArg import DependencyArg
40 from _emerge.DepPriority import DepPriority
41 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
42 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
43 from _emerge.FakeVartree import FakeVartree
44 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
45 from _emerge.is_valid_package_atom import is_valid_package_atom
46 from _emerge.Package import Package
47 from _emerge.PackageArg import PackageArg
48 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
49 from _emerge.RootConfig import RootConfig
50 from _emerge.search import search
51 from _emerge.SetArg import SetArg
52 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
53 from _emerge.UnmergeDepPriority import UnmergeDepPriority
55 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
56 from _emerge.resolver.slot_collision import slot_conflict_handler
57 from _emerge.resolver.circular_dependency import circular_dependency_handler
58 from _emerge.resolver.output import display, filter_iuse_defaults
60 if sys.hexversion >= 0x3000000:
64 class _scheduler_graph_config(object):
65 def __init__(self, trees, pkg_cache, graph, mergelist):
67 self.pkg_cache = pkg_cache
69 self.mergelist = mergelist
71 class _frozen_depgraph_config(object):
73 def __init__(self, settings, trees, myopts, spinner):
74 self.settings = settings
75 self.target_root = settings["ROOT"]
78 if settings.get("PORTAGE_DEBUG", "") == "1":
80 self.spinner = spinner
81 self._running_root = trees["/"]["root_config"]
82 self._opts_no_restart = frozenset(["--buildpkgonly",
83 "--fetchonly", "--fetch-all-uri", "--pretend"])
86 self._trees_orig = trees
88 # All Package instances
90 self._highest_license_masked = {}
92 self.trees[myroot] = {}
93 # Create a RootConfig instance that references
94 # the FakeVartree instead of the real one.
95 self.roots[myroot] = RootConfig(
96 trees[myroot]["vartree"].settings,
98 trees[myroot]["root_config"].setconfig)
99 for tree in ("porttree", "bintree"):
100 self.trees[myroot][tree] = trees[myroot][tree]
101 self.trees[myroot]["vartree"] = \
102 FakeVartree(trees[myroot]["root_config"],
103 pkg_cache=self._pkg_cache,
104 pkg_root_config=self.roots[myroot])
105 self.pkgsettings[myroot] = portage.config(
106 clone=self.trees[myroot]["vartree"].settings)
108 self._required_set_names = set(["world"])
110 self.excluded_pkgs = InternalPackageSet(allow_wildcard=True)
111 for x in ' '.join(myopts.get("--exclude", [])).split():
113 x = Atom(x, allow_wildcard=True)
114 except portage.exception.InvalidAtom:
115 x = Atom("*/" + x, allow_wildcard=True)
116 self.excluded_pkgs.add(x)
118 class _depgraph_sets(object):
120 # contains all sets added to the graph
122 # contains non-set atoms given as arguments
123 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
124 # contains all atoms from all sets added to the graph, including
125 # atoms given as arguments
126 self.atoms = InternalPackageSet(allow_repo=True)
127 self.atom_arg_map = {}
129 class _dynamic_depgraph_config(object):
131 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
132 self.myparams = myparams.copy()
133 self._vdb_loaded = False
134 self._allow_backtracking = allow_backtracking
135 # Maps slot atom to package for each Package added to the graph.
136 self._slot_pkg_map = {}
137 # Maps nodes to the reasons they were selected for reinstallation.
138 self._reinstall_nodes = {}
140 # Contains a filtered view of preferred packages that are selected
141 # from available repositories.
142 self._filtered_trees = {}
143 # Contains installed packages and new packages that have been added
145 self._graph_trees = {}
146 # Caches visible packages returned from _select_package, for use in
147 # depgraph._iter_atoms_for_pkg() SLOT logic.
148 self._visible_pkgs = {}
149 #contains the args created by select_files
150 self._initial_arg_list = []
151 self.digraph = portage.digraph()
152 # manages sets added to the graph
154 # contains all nodes pulled in by self.sets
155 self._set_nodes = set()
156 # Contains only Blocker -> Uninstall edges
157 self._blocker_uninstalls = digraph()
158 # Contains only Package -> Blocker edges
159 self._blocker_parents = digraph()
160 # Contains only irrelevant Package -> Blocker edges
161 self._irrelevant_blockers = digraph()
162 # Contains only unsolvable Package -> Blocker edges
163 self._unsolvable_blockers = digraph()
164 # Contains all Blocker -> Blocked Package edges
165 self._blocked_pkgs = digraph()
166 # Contains world packages that have been protected from
167 # uninstallation but may not have been added to the graph
168 # if the graph is not complete yet.
169 self._blocked_world_pkgs = {}
170 # Contains packages whose dependencies have been traversed.
171 # This use used to check if we have accounted for blockers
172 # relevant to a package.
173 self._traversed_pkg_deps = set()
174 self._slot_collision_info = {}
175 # Slot collision nodes are not allowed to block other packages since
176 # blocker validation is only able to account for one package per slot.
177 self._slot_collision_nodes = set()
178 self._parent_atoms = {}
179 self._slot_conflict_parent_atoms = set()
180 self._slot_conflict_handler = None
181 self._circular_dependency_handler = None
182 self._serialized_tasks_cache = None
183 self._scheduler_graph = None
184 self._displayed_list = None
185 self._pprovided_args = []
186 self._missing_args = []
187 self._masked_installed = set()
188 self._masked_license_updates = set()
189 self._unsatisfied_deps_for_display = []
190 self._unsatisfied_blockers_for_display = None
191 self._circular_deps_for_display = None
193 self._dep_disjunctive_stack = []
194 self._unsatisfied_deps = []
195 self._initially_unsatisfied_deps = []
196 self._ignored_deps = []
197 self._highest_pkg_cache = {}
199 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
200 self._needed_license_changes = backtrack_parameters.needed_license_changes
201 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
202 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
203 self._need_restart = False
204 self._backtrack_infos = {}
206 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask', 'n') == True
207 self._success_without_autounmask = False
209 for myroot in depgraph._frozen_config.trees:
210 self.sets[myroot] = _depgraph_sets()
211 self._slot_pkg_map[myroot] = {}
212 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
213 # This dbapi instance will model the state that the vdb will
214 # have after new packages have been installed.
215 fakedb = PackageVirtualDbapi(vardb.settings)
217 self.mydbapi[myroot] = fakedb
220 graph_tree.dbapi = fakedb
221 self._graph_trees[myroot] = {}
222 self._filtered_trees[myroot] = {}
223 # Substitute the graph tree for the vartree in dep_check() since we
224 # want atom selections to be consistent with package selections
225 # have already been made.
226 self._graph_trees[myroot]["porttree"] = graph_tree
227 self._graph_trees[myroot]["vartree"] = graph_tree
230 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
231 self._filtered_trees[myroot]["porttree"] = filtered_tree
232 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
234 # Passing in graph_tree as the vartree here could lead to better
235 # atom selections in some cases by causing atoms for packages that
236 # have been added to the graph to be preferred over other choices.
237 # However, it can trigger atom selections that result in
238 # unresolvable direct circular dependencies. For example, this
239 # happens with gwydion-dylan which depends on either itself or
240 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
241 # gwydion-dylan-bin needs to be selected in order to avoid a
242 # an unresolvable direct circular dependency.
244 # To solve the problem described above, pass in "graph_db" so that
245 # packages that have been added to the graph are distinguishable
246 # from other available packages and installed packages. Also, pass
247 # the parent package into self._select_atoms() calls so that
248 # unresolvable direct circular dependencies can be detected and
249 # avoided when possible.
250 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
251 self._filtered_trees[myroot]["vartree"] = \
252 depgraph._frozen_config.trees[myroot]["vartree"]
255 # (db, pkg_type, built, installed, db_keys)
256 if "remove" in self.myparams:
257 # For removal operations, use _dep_check_composite_db
258 # for availability and visibility checks. This provides
259 # consistency with install operations, so we don't
260 # get install/uninstall cycles like in bug #332719.
261 self._graph_trees[myroot]["porttree"] = filtered_tree
263 if "--usepkgonly" not in depgraph._frozen_config.myopts:
264 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
265 db_keys = list(portdb._aux_cache_keys)
266 dbs.append((portdb, "ebuild", False, False, db_keys))
268 if "--usepkg" in depgraph._frozen_config.myopts:
269 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
270 db_keys = list(bindb._aux_cache_keys)
271 dbs.append((bindb, "binary", True, False, db_keys))
273 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
274 db_keys = list(depgraph._frozen_config._trees_orig[myroot
275 ]["vartree"].dbapi._aux_cache_keys)
276 dbs.append((vardb, "installed", True, True, db_keys))
277 self._filtered_trees[myroot]["dbs"] = dbs
279 class depgraph(object):
281 pkg_tree_map = RootConfig.pkg_tree_map
283 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
285 def __init__(self, settings, trees, myopts, myparams, spinner,
286 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
287 if frozen_config is None:
288 frozen_config = _frozen_depgraph_config(settings, trees,
290 self._frozen_config = frozen_config
291 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
292 allow_backtracking, backtrack_parameters)
294 self._select_atoms = self._select_atoms_highest_available
295 self._select_package = self._select_pkg_highest_available
299 Load installed package metadata if appropriate. This used to be called
300 from the constructor, but that wasn't very nice since this procedure
301 is slow and it generates spinner output. So, now it's called on-demand
302 by various methods when necessary.
305 if self._dynamic_config._vdb_loaded:
308 for myroot in self._frozen_config.trees:
310 preload_installed_pkgs = \
311 "--nodeps" not in self._frozen_config.myopts and \
312 "--buildpkgonly" not in self._frozen_config.myopts
314 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
315 if not fake_vartree.dbapi:
316 # This needs to be called for the first depgraph, but not for
317 # backtracking depgraphs that share the same frozen_config.
320 # FakeVartree.sync() populates virtuals, and we want
321 # self.pkgsettings to have them populated too.
322 self._frozen_config.pkgsettings[myroot] = \
323 portage.config(clone=fake_vartree.settings)
325 if preload_installed_pkgs:
326 vardb = fake_vartree.dbapi
327 fakedb = self._dynamic_config._graph_trees[
328 myroot]["vartree"].dbapi
331 self._spinner_update()
332 # This triggers metadata updates via FakeVartree.
333 vardb.aux_get(pkg.cpv, [])
334 fakedb.cpv_inject(pkg)
336 # Now that the vardb state is cached in our FakeVartree,
337 # we won't be needing the real vartree cache for awhile.
338 # To make some room on the heap, clear the vardbapi
340 self._frozen_config._trees_orig[myroot
341 ]["vartree"].dbapi._clear_cache()
344 self._dynamic_config._vdb_loaded = True
346 def _spinner_update(self):
347 if self._frozen_config.spinner:
348 self._frozen_config.spinner.update()
350 def _show_missed_update(self):
352 if '--quiet' in self._frozen_config.myopts and \
353 '--debug' not in self._frozen_config.myopts:
356 # In order to minimize noise, show only the highest
357 # missed update from each SLOT.
359 for pkg, mask_reasons in \
360 self._dynamic_config._runtime_pkg_mask.items():
362 # Exclude installed here since we only
363 # want to show available updates.
365 k = (pkg.root, pkg.slot_atom)
366 if k in missed_updates:
367 other_pkg, mask_type, parent_atoms = missed_updates[k]
370 for mask_type, parent_atoms in mask_reasons.items():
373 missed_updates[k] = (pkg, mask_type, parent_atoms)
376 if not missed_updates:
379 missed_update_types = {}
380 for pkg, mask_type, parent_atoms in missed_updates.values():
381 missed_update_types.setdefault(mask_type,
382 []).append((pkg, parent_atoms))
384 self._show_missed_update_slot_conflicts(
385 missed_update_types.get("slot conflict"))
387 self._show_missed_update_unsatisfied_dep(
388 missed_update_types.get("missing dependency"))
390 def _show_missed_update_unsatisfied_dep(self, missed_updates):
392 if not missed_updates:
395 backtrack_masked = []
397 for pkg, parent_atoms in missed_updates:
400 for parent, root, atom in parent_atoms:
401 self._show_unsatisfied_dep(root, atom, myparent=parent,
402 check_backtrack=True)
403 except self._backtrack_mask:
404 # This is displayed below in abbreviated form.
405 backtrack_masked.append((pkg, parent_atoms))
408 writemsg("\n!!! The following update has been skipped " + \
409 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
411 writemsg(str(pkg.slot_atom), noiselevel=-1)
413 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
414 writemsg("\n", noiselevel=-1)
416 for parent, root, atom in parent_atoms:
417 self._show_unsatisfied_dep(root, atom, myparent=parent)
418 writemsg("\n", noiselevel=-1)
421 # These are shown in abbreviated form, in order to avoid terminal
422 # flooding from mask messages as reported in bug #285832.
423 writemsg("\n!!! The following update(s) have been skipped " + \
424 "due to unsatisfied dependencies\n" + \
425 "!!! triggered by backtracking:\n\n", noiselevel=-1)
426 for pkg, parent_atoms in backtrack_masked:
427 writemsg(str(pkg.slot_atom), noiselevel=-1)
429 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
430 writemsg("\n", noiselevel=-1)
432 def _show_missed_update_slot_conflicts(self, missed_updates):
434 if not missed_updates:
438 msg.append("\n!!! One or more updates have been skipped due to " + \
439 "a dependency conflict:\n\n")
442 for pkg, parent_atoms in missed_updates:
443 msg.append(str(pkg.slot_atom))
445 msg.append(" for %s" % (pkg.root,))
448 for parent, atom in parent_atoms:
452 msg.append(" conflicts with\n")
454 if isinstance(parent,
455 (PackageArg, AtomArg)):
456 # For PackageArg and AtomArg types, it's
457 # redundant to display the atom attribute.
458 msg.append(str(parent))
460 # Display the specific atom from SetArg or
462 msg.append("%s required by %s" % (atom, parent))
466 writemsg("".join(msg), noiselevel=-1)
468 def _show_slot_collision_notice(self):
469 """Show an informational message advising the user to mask one of the
470 the packages. In some cases it may be possible to resolve this
471 automatically, but support for backtracking (removal nodes that have
472 already been selected) will be required in order to handle all possible
476 if not self._dynamic_config._slot_collision_info:
479 self._show_merge_list()
481 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
482 handler = self._dynamic_config._slot_conflict_handler
484 conflict = handler.get_conflict()
485 writemsg(conflict, noiselevel=-1)
487 explanation = handler.get_explanation()
489 writemsg(explanation, noiselevel=-1)
492 if "--quiet" in self._frozen_config.myopts:
496 msg.append("It may be possible to solve this problem ")
497 msg.append("by using package.mask to prevent one of ")
498 msg.append("those packages from being selected. ")
499 msg.append("However, it is also possible that conflicting ")
500 msg.append("dependencies exist such that they are impossible to ")
501 msg.append("satisfy simultaneously. If such a conflict exists in ")
502 msg.append("the dependencies of two different packages, then those ")
503 msg.append("packages can not be installed simultaneously.")
504 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
505 if not self._dynamic_config._allow_backtracking and \
506 (backtrack_opt is None or \
507 (backtrack_opt > 0 and backtrack_opt < 30)):
508 msg.append(" You may want to try a larger value of the ")
509 msg.append("--backtrack option, such as --backtrack=30, ")
510 msg.append("in order to see if that will solve this conflict ")
511 msg.append("automatically.")
513 for line in textwrap.wrap(''.join(msg), 70):
514 writemsg(line + '\n', noiselevel=-1)
515 writemsg('\n', noiselevel=-1)
518 msg.append("For more information, see MASKED PACKAGES ")
519 msg.append("section in the emerge man page or refer ")
520 msg.append("to the Gentoo Handbook.")
521 for line in textwrap.wrap(''.join(msg), 70):
522 writemsg(line + '\n', noiselevel=-1)
523 writemsg('\n', noiselevel=-1)
525 def _process_slot_conflicts(self):
527 Process slot conflict data to identify specific atoms which
528 lead to conflict. These atoms only match a subset of the
529 packages that have been pulled into a given slot.
531 for (slot_atom, root), slot_nodes \
532 in self._dynamic_config._slot_collision_info.items():
534 all_parent_atoms = set()
535 for pkg in slot_nodes:
536 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
539 all_parent_atoms.update(parent_atoms)
541 for pkg in slot_nodes:
542 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
543 if parent_atoms is None:
545 self._dynamic_config._parent_atoms[pkg] = parent_atoms
546 for parent_atom in all_parent_atoms:
547 if parent_atom in parent_atoms:
549 # Use package set for matching since it will match via
550 # PROVIDE when necessary, while match_from_list does not.
551 parent, atom = parent_atom
552 atom_set = InternalPackageSet(
553 initial_atoms=(atom,))
554 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
555 parent_atoms.add(parent_atom)
557 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
559 def _reinstall_for_flags(self, forced_flags,
560 orig_use, orig_iuse, cur_use, cur_iuse):
561 """Return a set of flags that trigger reinstallation, or None if there
562 are no such flags."""
563 if "--newuse" in self._frozen_config.myopts or \
564 "--binpkg-respect-use" in self._frozen_config.myopts:
565 flags = set(orig_iuse.symmetric_difference(
566 cur_iuse).difference(forced_flags))
567 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
568 cur_iuse.intersection(cur_use)))
571 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
572 flags = orig_iuse.intersection(orig_use).symmetric_difference(
573 cur_iuse.intersection(cur_use))
578 def _create_graph(self, allow_unsatisfied=False):
579 dep_stack = self._dynamic_config._dep_stack
580 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
581 while dep_stack or dep_disjunctive_stack:
582 self._spinner_update()
584 dep = dep_stack.pop()
585 if isinstance(dep, Package):
586 if not self._add_pkg_deps(dep,
587 allow_unsatisfied=allow_unsatisfied):
590 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
592 if dep_disjunctive_stack:
593 if not self._pop_disjunction(allow_unsatisfied):
597 def _expand_set_args(self, input_args, add_to_digraph=False):
599 Iterate over a list of DependencyArg instances and yield all
600 instances given in the input together with additional SetArg
601 instances that are generated from nested sets.
602 @param input_args: An iterable of DependencyArg instances
603 @type input_args: Iterable
604 @param add_to_digraph: If True then add SetArg instances
605 to the digraph, in order to record parent -> child
606 relationships from nested sets
607 @type add_to_digraph: Boolean
609 @returns: All args given in the input together with additional
610 SetArg instances that are generated from nested sets
613 traversed_set_args = set()
615 for arg in input_args:
616 if not isinstance(arg, SetArg):
620 root_config = arg.root_config
621 depgraph_sets = self._dynamic_config.sets[root_config.root]
624 arg = arg_stack.pop()
625 if arg in traversed_set_args:
627 traversed_set_args.add(arg)
630 # Traverse nested sets and add them to the stack
631 # if they're not already in the graph. Also, graph
632 # edges between parent and nested sets.
633 for token in arg.pset.getNonAtoms():
634 if not token.startswith(SETPREFIX):
636 s = token[len(SETPREFIX):]
637 nested_set = depgraph_sets.sets.get(s)
638 if nested_set is None:
639 nested_set = root_config.sets.get(s)
640 if nested_set is not None:
641 nested_arg = SetArg(arg=token, pset=nested_set,
642 root_config=root_config)
643 arg_stack.append(nested_arg)
645 self._dynamic_config.digraph.add(nested_arg, arg,
646 priority=BlockerDepPriority.instance)
647 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
649 def _add_dep(self, dep, allow_unsatisfied=False):
650 debug = "--debug" in self._frozen_config.myopts
651 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
652 nodeps = "--nodeps" in self._frozen_config.myopts
653 empty = "empty" in self._dynamic_config.myparams
654 deep = self._dynamic_config.myparams.get("deep", 0)
655 recurse = empty or deep is True or dep.depth <= deep
657 if not buildpkgonly and \
659 dep.parent not in self._dynamic_config._slot_collision_nodes:
660 if dep.parent.onlydeps:
661 # It's safe to ignore blockers if the
662 # parent is an --onlydeps node.
664 # The blocker applies to the root where
665 # the parent is or will be installed.
666 blocker = Blocker(atom=dep.atom,
667 eapi=dep.parent.metadata["EAPI"],
668 priority=dep.priority, root=dep.parent.root)
669 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
672 if dep.child is None:
673 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
674 onlydeps=dep.onlydeps)
676 # The caller has selected a specific package
677 # via self._minimize_packages().
679 existing_node = self._dynamic_config._slot_pkg_map[
680 dep.root].get(dep_pkg.slot_atom)
681 if existing_node is not dep_pkg:
685 if dep.priority.optional:
686 # This could be an unecessary build-time dep
687 # pulled in by --with-bdeps=y.
689 if allow_unsatisfied:
690 self._dynamic_config._unsatisfied_deps.append(dep)
692 self._dynamic_config._unsatisfied_deps_for_display.append(
693 ((dep.root, dep.atom), {"myparent":dep.parent}))
695 # The parent node should not already be in
696 # runtime_pkg_mask, since that would trigger an
697 # infinite backtracking loop.
698 if self._dynamic_config._allow_backtracking:
699 if dep.parent in self._dynamic_config._runtime_pkg_mask:
700 if "--debug" in self._frozen_config.myopts:
702 "!!! backtracking loop detected: %s %s\n" % \
704 self._dynamic_config._runtime_pkg_mask[
705 dep.parent]), noiselevel=-1)
706 elif not self.need_restart():
707 # Do not backtrack if only USE have to be changed in
708 # order to satisfy the dependency.
709 dep_pkg, existing_node = \
710 self._select_package(dep.root, dep.atom.without_use,
711 onlydeps=dep.onlydeps)
713 self._dynamic_config._backtrack_infos["missing dependency"] = dep
714 self._dynamic_config._need_restart = True
715 if "--debug" in self._frozen_config.myopts:
719 msg.append("backtracking due to unsatisfied dep:")
720 msg.append(" parent: %s" % dep.parent)
721 msg.append(" priority: %s" % dep.priority)
722 msg.append(" root: %s" % dep.root)
723 msg.append(" atom: %s" % dep.atom)
725 writemsg_level("".join("%s\n" % l for l in msg),
726 noiselevel=-1, level=logging.DEBUG)
729 # In some cases, dep_check will return deps that shouldn't
730 # be proccessed any further, so they are identified and
731 # discarded here. Try to discard as few as possible since
732 # discarded dependencies reduce the amount of information
733 # available for optimization of merge order.
734 if dep.priority.satisfied and \
735 dep.priority.satisfied.visible and \
736 not dep_pkg.installed and \
737 not (existing_node or recurse):
739 if dep.root == self._frozen_config.target_root:
741 myarg = next(self._iter_atoms_for_pkg(dep_pkg))
742 except StopIteration:
744 except portage.exception.InvalidDependString:
745 if not dep_pkg.installed:
746 # This shouldn't happen since the package
747 # should have been masked.
750 # Existing child selection may not be valid unless
751 # it's added to the graph immediately, since "complete"
752 # mode may select a different child later.
754 self._dynamic_config._ignored_deps.append(dep)
757 if not self._add_pkg(dep_pkg, dep):
761 def _add_pkg(self, pkg, dep):
768 myparent = dep.parent
769 priority = dep.priority
772 priority = DepPriority()
774 Fills the digraph with nodes comprised of packages to merge.
775 mybigkey is the package spec of the package to merge.
776 myparent is the package depending on mybigkey ( or None )
777 addme = Should we add this package to the digraph or are we just looking at it's deps?
778 Think --onlydeps, we need to ignore packages in that case.
781 #IUSE-aware emerge -> USE DEP aware depgraph
782 #"no downgrade" emerge
784 # Ensure that the dependencies of the same package
785 # are never processed more than once.
786 previously_added = pkg in self._dynamic_config.digraph
788 # select the correct /var database that we'll be checking against
789 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
790 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
795 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
796 except portage.exception.InvalidDependString as e:
797 if not pkg.installed:
798 # should have been masked before it was selected
803 if not pkg.installed and \
804 "empty" not in self._dynamic_config.myparams and \
805 vardbapi.match(pkg.slot_atom):
806 # Increase the priority of dependencies on packages that
807 # are being rebuilt. This optimizes merge order so that
808 # dependencies are rebuilt/updated as soon as possible,
809 # which is needed especially when emerge is called by
810 # revdep-rebuild since dependencies may be affected by ABI
811 # breakage that has rendered them useless. Don't adjust
812 # priority here when in "empty" mode since all packages
813 # are being merged in that case.
814 priority.rebuild = True
816 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
817 slot_collision = False
819 existing_node_matches = pkg.cpv == existing_node.cpv
820 if existing_node_matches and \
821 pkg != existing_node and \
822 dep.atom is not None:
823 # Use package set for matching since it will match via
824 # PROVIDE when necessary, while match_from_list does not.
825 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
826 if not atom_set.findAtomForPackage(existing_node, \
827 modified_use=self._pkg_use_enabled(existing_node)):
828 existing_node_matches = False
829 if existing_node_matches:
830 # The existing node can be reused.
832 for parent_atom in arg_atoms:
833 parent, atom = parent_atom
834 self._dynamic_config.digraph.add(existing_node, parent,
836 self._add_parent_atom(existing_node, parent_atom)
837 # If a direct circular dependency is not an unsatisfied
838 # buildtime dependency then drop it here since otherwise
839 # it can skew the merge order calculation in an unwanted
841 if existing_node != myparent or \
842 (priority.buildtime and not priority.satisfied):
843 self._dynamic_config.digraph.addnode(existing_node, myparent,
845 if dep.atom is not None and dep.parent is not None:
846 self._add_parent_atom(existing_node,
847 (dep.parent, dep.atom))
850 # A slot conflict has occurred.
851 # The existing node should not already be in
852 # runtime_pkg_mask, since that would trigger an
853 # infinite backtracking loop.
854 if self._dynamic_config._allow_backtracking and \
856 self._dynamic_config._runtime_pkg_mask:
857 if "--debug" in self._frozen_config.myopts:
859 "!!! backtracking loop detected: %s %s\n" % \
861 self._dynamic_config._runtime_pkg_mask[
862 existing_node]), noiselevel=-1)
863 elif self._dynamic_config._allow_backtracking and \
864 not self._accept_blocker_conflicts() and \
865 not self.need_restart():
867 self._add_slot_conflict(pkg)
868 if dep.atom is not None and dep.parent is not None:
869 self._add_parent_atom(pkg, (dep.parent, dep.atom))
872 for parent_atom in arg_atoms:
873 parent, atom = parent_atom
874 self._add_parent_atom(pkg, parent_atom)
875 self._process_slot_conflicts()
879 for node, other_node in (existing_node, pkg), (pkg, existing_node):
881 self._dynamic_config._parent_atoms.get(node, set())
883 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
885 parent_atoms = conflict_atoms
886 all_parents.update(parent_atoms)
887 if node < other_node:
889 backtrack_data.append((node, parent_atoms))
891 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
892 self._dynamic_config._need_restart = True
893 if "--debug" in self._frozen_config.myopts:
897 msg.append("backtracking due to slot conflict:")
898 msg.append(" first package: %s" % existing_node)
899 msg.append(" second package: %s" % pkg)
900 msg.append(" slot: %s" % pkg.slot_atom)
901 msg.append(" parents: %s" % \
902 [(str(parent), atom) \
903 for parent, atom in all_parents])
905 writemsg_level("".join("%s\n" % l for l in msg),
906 noiselevel=-1, level=logging.DEBUG)
909 # A slot collision has occurred. Sometimes this coincides
910 # with unresolvable blockers, so the slot collision will be
911 # shown later if there are no unresolvable blockers.
912 self._add_slot_conflict(pkg)
913 slot_collision = True
916 # Now add this node to the graph so that self.display()
917 # can show use flags and --tree portage.output. This node is
918 # only being partially added to the graph. It must not be
919 # allowed to interfere with the other nodes that have been
920 # added. Do not overwrite data for existing nodes in
921 # self._dynamic_config.mydbapi since that data will be used for blocker
923 # Even though the graph is now invalid, continue to process
924 # dependencies so that things like --fetchonly can still
925 # function despite collisions.
927 elif not previously_added:
928 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
929 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
930 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
931 self._dynamic_config._highest_pkg_cache.clear()
932 self._check_masks(pkg)
934 if not pkg.installed:
935 # Allow this package to satisfy old-style virtuals in case it
936 # doesn't already. Any pre-existing providers will be preferred
939 pkgsettings.setinst(pkg.cpv, pkg.metadata)
940 # For consistency, also update the global virtuals.
941 settings = self._frozen_config.roots[pkg.root].settings
943 settings.setinst(pkg.cpv, pkg.metadata)
945 except portage.exception.InvalidDependString as e:
946 if not pkg.installed:
947 # should have been masked before it was selected
951 self._dynamic_config._set_nodes.add(pkg)
953 # Do this even when addme is False (--onlydeps) so that the
954 # parent/child relationship is always known in case
955 # self._show_slot_collision_notice() needs to be called later.
956 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
957 if dep.atom is not None and dep.parent is not None:
958 self._add_parent_atom(pkg, (dep.parent, dep.atom))
961 for parent_atom in arg_atoms:
962 parent, atom = parent_atom
963 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
964 self._add_parent_atom(pkg, parent_atom)
966 """ This section determines whether we go deeper into dependencies or not.
967 We want to go deeper on a few occasions:
968 Installing package A, we need to make sure package A's deps are met.
969 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
970 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
975 deep = self._dynamic_config.myparams.get("deep", 0)
976 empty = "empty" in self._dynamic_config.myparams
977 recurse = empty or deep is True or depth + 1 <= deep
978 dep_stack = self._dynamic_config._dep_stack
979 if "recurse" not in self._dynamic_config.myparams:
981 elif pkg.installed and not recurse:
982 dep_stack = self._dynamic_config._ignored_deps
984 self._spinner_update()
986 if not previously_added:
987 dep_stack.append(pkg)
990 def _check_masks(self, pkg):
992 slot_key = (pkg.root, pkg.slot_atom)
994 # Check for upgrades in the same slot that are
995 # masked due to a LICENSE change in a newer
996 # version that is not masked for any other reason.
997 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
998 if other_pkg is not None and pkg < other_pkg:
999 self._dynamic_config._masked_license_updates.add(other_pkg)
1001 def _add_parent_atom(self, pkg, parent_atom):
1002 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1003 if parent_atoms is None:
1004 parent_atoms = set()
1005 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1006 parent_atoms.add(parent_atom)
1008 def _add_slot_conflict(self, pkg):
1009 self._dynamic_config._slot_collision_nodes.add(pkg)
1010 slot_key = (pkg.slot_atom, pkg.root)
1011 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1012 if slot_nodes is None:
1014 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1015 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1018 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1020 mytype = pkg.type_name
1023 metadata = pkg.metadata
1024 myuse = self._pkg_use_enabled(pkg)
1026 depth = pkg.depth + 1
1027 removal_action = "remove" in self._dynamic_config.myparams
1030 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1032 edepend[k] = metadata[k]
1034 if not pkg.built and \
1035 "--buildpkgonly" in self._frozen_config.myopts and \
1036 "deep" not in self._dynamic_config.myparams and \
1037 "empty" not in self._dynamic_config.myparams:
1038 edepend["RDEPEND"] = ""
1039 edepend["PDEPEND"] = ""
1041 if pkg.built and not removal_action:
1042 if self._frozen_config.myopts.get("--with-bdeps", "n") == "y":
1043 # Pull in build time deps as requested, but marked them as
1044 # "optional" since they are not strictly required. This allows
1045 # more freedom in the merge order calculation for solving
1046 # circular dependencies. Don't convert to PDEPEND since that
1047 # could make --with-bdeps=y less effective if it is used to
1048 # adjust merge order to prevent built_with_use() calls from
1052 # built packages do not have build time dependencies.
1053 edepend["DEPEND"] = ""
1055 if removal_action and self._frozen_config.myopts.get("--with-bdeps", "y") == "n":
1056 edepend["DEPEND"] = ""
1059 depend_root = myroot
1062 root_deps = self._frozen_config.myopts.get("--root-deps")
1063 if root_deps is not None:
1064 if root_deps is True:
1065 depend_root = myroot
1066 elif root_deps == "rdeps":
1067 edepend["DEPEND"] = ""
1070 (depend_root, edepend["DEPEND"],
1071 self._priority(buildtime=(not pkg.built),
1072 optional=pkg.built),
1074 (myroot, edepend["RDEPEND"],
1075 self._priority(runtime=True),
1077 (myroot, edepend["PDEPEND"],
1078 self._priority(runtime_post=True),
1082 debug = "--debug" in self._frozen_config.myopts
1083 strict = mytype != "installed"
1085 for dep_root, dep_string, dep_priority, ignore_blockers in deps:
1089 writemsg_level("\nParent: %s\n" % (pkg,),
1090 noiselevel=-1, level=logging.DEBUG)
1091 writemsg_level("Depstring: %s\n" % (dep_string,),
1092 noiselevel=-1, level=logging.DEBUG)
1093 writemsg_level("Priority: %s\n" % (dep_priority,),
1094 noiselevel=-1, level=logging.DEBUG)
1097 dep_string = portage.dep.use_reduce(dep_string,
1098 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1099 except portage.exception.InvalidDependString as e:
1100 if not pkg.installed:
1101 # should have been masked before it was selected
1105 # Try again, but omit the is_valid_flag argument, since
1106 # invalid USE conditionals are a common problem and it's
1107 # practical to ignore this issue for installed packages.
1109 dep_string = portage.dep.use_reduce(dep_string,
1110 uselist=self._pkg_use_enabled(pkg))
1111 except portage.exception.InvalidDependString as e:
1112 self._dynamic_config._masked_installed.add(pkg)
1117 dep_string = list(self._queue_disjunctive_deps(
1118 pkg, dep_root, dep_priority, dep_string))
1119 except portage.exception.InvalidDependString as e:
1121 self._dynamic_config._masked_installed.add(pkg)
1125 # should have been masked before it was selected
1131 dep_string = portage.dep.paren_enclose(dep_string)
1133 if not self._add_pkg_dep_string(
1134 pkg, dep_root, dep_priority, dep_string,
1135 allow_unsatisfied, ignore_blockers=ignore_blockers):
1138 except portage.exception.AmbiguousPackageName as e:
1140 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1141 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1143 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1144 portage.writemsg("\n", noiselevel=-1)
1145 if mytype == "binary":
1147 "!!! This binary package cannot be installed: '%s'\n" % \
1148 mykey, noiselevel=-1)
1149 elif mytype == "ebuild":
1150 portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
1151 myebuild, mylocation = portdb.findname2(mykey, myrepo = pkg.repo)
1152 portage.writemsg("!!! This ebuild cannot be installed: " + \
1153 "'%s'\n" % myebuild, noiselevel=-1)
1154 portage.writemsg("!!! Please notify the package maintainer " + \
1155 "that atoms must be fully-qualified.\n", noiselevel=-1)
1157 self._dynamic_config._traversed_pkg_deps.add(pkg)
1160 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1161 allow_unsatisfied, ignore_blockers=False):
1162 depth = pkg.depth + 1
1163 debug = "--debug" in self._frozen_config.myopts
1164 strict = pkg.type_name != "installed"
1167 writemsg_level("\nParent: %s\n" % (pkg,),
1168 noiselevel=-1, level=logging.DEBUG)
1169 writemsg_level("Depstring: %s\n" % (dep_string,),
1170 noiselevel=-1, level=logging.DEBUG)
1171 writemsg_level("Priority: %s\n" % (dep_priority,),
1172 noiselevel=-1, level=logging.DEBUG)
1175 selected_atoms = self._select_atoms(dep_root,
1176 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1177 strict=strict, priority=dep_priority)
1178 except portage.exception.InvalidDependString as e:
1180 self._dynamic_config._masked_installed.add(pkg)
1183 # should have been masked before it was selected
1187 writemsg_level("Candidates: %s\n" % \
1188 ([str(x) for x in selected_atoms[pkg]],),
1189 noiselevel=-1, level=logging.DEBUG)
1191 root_config = self._frozen_config.roots[dep_root]
1192 vardb = root_config.trees["vartree"].dbapi
1194 for atom, child in self._minimize_children(
1195 pkg, dep_priority, root_config, selected_atoms[pkg]):
1197 if ignore_blockers and atom.blocker:
1198 # For --with-bdeps, ignore build-time only blockers
1199 # that originate from built packages.
1202 mypriority = dep_priority.copy()
1203 if not atom.blocker:
1204 inst_pkgs = vardb.match_pkgs(atom)
1206 for inst_pkg in inst_pkgs:
1207 if self._pkg_visibility_check(inst_pkg):
1209 mypriority.satisfied = inst_pkg
1211 if not mypriority.satisfied:
1212 # none visible, so use highest
1213 mypriority.satisfied = inst_pkgs[0]
1215 if not self._add_dep(Dependency(atom=atom,
1216 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1217 priority=mypriority, root=dep_root),
1218 allow_unsatisfied=allow_unsatisfied):
1221 selected_atoms.pop(pkg)
1223 # Add selected indirect virtual deps to the graph. This
1224 # takes advantage of circular dependency avoidance that's done
1225 # by dep_zapdeps. We preserve actual parent/child relationships
1226 # here in order to avoid distorting the dependency graph like
1227 # <=portage-2.1.6.x did.
1228 for virt_pkg, atoms in selected_atoms.items():
1231 writemsg_level("Candidates: %s: %s\n" % \
1232 (virt_pkg.cpv, [str(x) for x in atoms]),
1233 noiselevel=-1, level=logging.DEBUG)
1235 # Just assume depth + 1 here for now, though it's not entirely
1236 # accurate since multilple levels of indirect virtual deps may
1237 # have been traversed. The _add_pkg call will reset the depth to
1238 # 0 if this package happens to match an argument.
1239 if not self._add_pkg(virt_pkg,
1240 Dependency(atom=Atom('=' + virt_pkg.cpv),
1241 depth=(depth + 1), parent=pkg, priority=dep_priority.copy(),
1245 for atom, child in self._minimize_children(
1246 pkg, self._priority(runtime=True), root_config, atoms):
1247 # This is a GLEP 37 virtual, so its deps are all runtime.
1248 mypriority = self._priority(runtime=True)
1249 if not atom.blocker:
1250 inst_pkgs = vardb.match_pkgs(atom)
1252 for inst_pkg in inst_pkgs:
1253 if self._pkg_visibility_check(inst_pkg):
1255 mypriority.satisfied = inst_pkg
1257 if not mypriority.satisfied:
1258 # none visible, so use highest
1259 mypriority.satisfied = inst_pkgs[0]
1261 if not self._add_dep(Dependency(atom=atom,
1262 blocker=atom.blocker, child=child, depth=virt_pkg.depth,
1263 parent=virt_pkg, priority=mypriority, root=dep_root),
1264 allow_unsatisfied=allow_unsatisfied):
1268 writemsg_level("Exiting... %s\n" % (pkg,),
1269 noiselevel=-1, level=logging.DEBUG)
1273 def _minimize_children(self, parent, priority, root_config, atoms):
1275 Selects packages to satisfy the given atoms, and minimizes the
1276 number of selected packages. This serves to identify and eliminate
1277 redundant package selections when multiple atoms happen to specify
1287 dep_pkg, existing_node = self._select_package(
1288 root_config.root, atom)
1292 atom_pkg_map[atom] = dep_pkg
1294 if len(atom_pkg_map) < 2:
1295 for item in atom_pkg_map.items():
1301 for atom, pkg in atom_pkg_map.items():
1302 pkg_atom_map.setdefault(pkg, set()).add(atom)
1303 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1305 for cp, pkgs in cp_pkg_map.items():
1308 for atom in pkg_atom_map[pkg]:
1312 # Use a digraph to identify and eliminate any
1313 # redundant package selections.
1314 atom_pkg_graph = digraph()
1317 for atom in pkg_atom_map[pkg1]:
1319 atom_pkg_graph.add(pkg1, atom)
1320 atom_set = InternalPackageSet(initial_atoms=(atom,))
1324 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1325 atom_pkg_graph.add(pkg2, atom)
1328 eliminate_pkg = True
1329 for atom in atom_pkg_graph.parent_nodes(pkg):
1330 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1331 eliminate_pkg = False
1334 atom_pkg_graph.remove(pkg)
1336 # Yield < and <= atoms first, since those are more likely to
1337 # cause slot conflicts, and we want those atoms to be displayed
1338 # in the resulting slot conflict message (see bug #291142).
1341 for atom in cp_atoms:
1342 if atom.operator in ('<', '<='):
1343 less_than.append(atom)
1345 not_less_than.append(atom)
1347 for atom in chain(less_than, not_less_than):
1348 child_pkgs = atom_pkg_graph.child_nodes(atom)
1349 yield (atom, child_pkgs[0])
1351 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1353 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1354 Yields non-disjunctive deps. Raises InvalidDependString when
1358 while i < len(dep_struct):
1360 if isinstance(x, list):
1361 for y in self._queue_disjunctive_deps(
1362 pkg, dep_root, dep_priority, x):
1365 self._queue_disjunction(pkg, dep_root, dep_priority,
1366 [ x, dep_struct[ i + 1 ] ] )
1370 x = portage.dep.Atom(x)
1371 except portage.exception.InvalidAtom:
1372 if not pkg.installed:
1373 raise portage.exception.InvalidDependString(
1374 "invalid atom: '%s'" % x)
1376 # Note: Eventually this will check for PROPERTIES=virtual
1377 # or whatever other metadata gets implemented for this
1379 if x.cp.startswith('virtual/'):
1380 self._queue_disjunction( pkg, dep_root,
1381 dep_priority, [ str(x) ] )
1386 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1387 self._dynamic_config._dep_disjunctive_stack.append(
1388 (pkg, dep_root, dep_priority, dep_struct))
1390 def _pop_disjunction(self, allow_unsatisfied):
1392 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1393 populate self._dynamic_config._dep_stack.
1395 pkg, dep_root, dep_priority, dep_struct = \
1396 self._dynamic_config._dep_disjunctive_stack.pop()
1397 dep_string = portage.dep.paren_enclose(dep_struct)
1398 if not self._add_pkg_dep_string(
1399 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1403 def _priority(self, **kwargs):
1404 if "remove" in self._dynamic_config.myparams:
1405 priority_constructor = UnmergeDepPriority
1407 priority_constructor = DepPriority
1408 return priority_constructor(**kwargs)
1410 def _dep_expand(self, root_config, atom_without_category):
1412 @param root_config: a root config instance
1413 @type root_config: RootConfig
1414 @param atom_without_category: an atom without a category component
1415 @type atom_without_category: String
1417 @returns: a list of atoms containing categories (possibly empty)
1419 null_cp = portage.dep_getkey(insert_category_into_atom(
1420 atom_without_category, "null"))
1421 cat, atom_pn = portage.catsplit(null_cp)
1423 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1425 for db, pkg_type, built, installed, db_keys in dbs:
1426 for cat in db.categories:
1427 if db.cp_list("%s/%s" % (cat, atom_pn)):
1431 for cat in categories:
1432 deps.append(Atom(insert_category_into_atom(
1433 atom_without_category, cat), allow_repo=True))
1436 def _have_new_virt(self, root, atom_cp):
1438 for db, pkg_type, built, installed, db_keys in \
1439 self._dynamic_config._filtered_trees[root]["dbs"]:
1440 if db.cp_list(atom_cp):
1445 def _iter_atoms_for_pkg(self, pkg):
1446 depgraph_sets = self._dynamic_config.sets[pkg.root]
1447 atom_arg_map = depgraph_sets.atom_arg_map
1448 root_config = self._frozen_config.roots[pkg.root]
1449 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1450 if atom.cp != pkg.cp and \
1451 self._have_new_virt(pkg.root, atom.cp):
1454 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1455 visible_pkgs.reverse() # descending order
1457 for visible_pkg in visible_pkgs:
1458 if visible_pkg.cp != atom.cp:
1460 if pkg >= visible_pkg:
1461 # This is descending order, and we're not
1462 # interested in any versions <= pkg given.
1464 if pkg.slot_atom != visible_pkg.slot_atom:
1465 higher_slot = visible_pkg
1467 if higher_slot is not None:
1469 for arg in atom_arg_map[(atom, pkg.root)]:
1470 if isinstance(arg, PackageArg) and \
1475 def select_files(self, myfiles):
1476 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1477 self._dynamic_config._initial_arg_list and call self._resolve to create the
1478 appropriate depgraph and return a favorite list."""
1480 debug = "--debug" in self._frozen_config.myopts
1481 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1482 sets = root_config.sets
1483 depgraph_sets = self._dynamic_config.sets[root_config.root]
1485 myroot = self._frozen_config.target_root
1486 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1487 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1488 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1489 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1490 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1491 pkgsettings = self._frozen_config.pkgsettings[myroot]
1493 onlydeps = "--onlydeps" in self._frozen_config.myopts
1496 ext = os.path.splitext(x)[1]
1498 if not os.path.exists(x):
1500 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1501 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1502 elif os.path.exists(
1503 os.path.join(pkgsettings["PKGDIR"], x)):
1504 x = os.path.join(pkgsettings["PKGDIR"], x)
1506 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1507 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1508 return 0, myfavorites
1509 mytbz2=portage.xpak.tbz2(x)
1510 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1511 if os.path.realpath(x) != \
1512 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1513 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1514 return 0, myfavorites
1516 pkg = self._pkg(mykey, "binary", root_config,
1518 args.append(PackageArg(arg=x, package=pkg,
1519 root_config=root_config))
1520 elif ext==".ebuild":
1521 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1522 pkgdir = os.path.dirname(ebuild_path)
1523 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1524 cp = pkgdir[len(tree_root)+1:]
1525 e = portage.exception.PackageNotFound(
1526 ("%s is not in a valid portage tree " + \
1527 "hierarchy or does not exist") % x)
1528 if not portage.isvalidatom(cp):
1530 cat = portage.catsplit(cp)[0]
1531 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1532 if not portage.isvalidatom("="+mykey):
1534 ebuild_path = portdb.findname(mykey)
1536 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1537 cp, os.path.basename(ebuild_path)):
1538 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1539 return 0, myfavorites
1540 if mykey not in portdb.xmatch(
1541 "match-visible", portage.cpv_getkey(mykey)):
1542 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1543 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1544 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1545 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1548 raise portage.exception.PackageNotFound(
1549 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1550 pkg = self._pkg(mykey, "ebuild", root_config,
1552 args.append(PackageArg(arg=x, package=pkg,
1553 root_config=root_config))
1554 elif x.startswith(os.path.sep):
1555 if not x.startswith(myroot):
1556 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1557 " $ROOT.\n") % x, noiselevel=-1)
1559 # Queue these up since it's most efficient to handle
1560 # multiple files in a single iter_owners() call.
1561 lookup_owners.append(x)
1563 if x in ("system", "world"):
1565 if x.startswith(SETPREFIX):
1566 s = x[len(SETPREFIX):]
1568 raise portage.exception.PackageSetNotFound(s)
1569 if s in depgraph_sets.sets:
1572 depgraph_sets.sets[s] = pset
1573 args.append(SetArg(arg=x, pset=pset,
1574 root_config=root_config))
1576 if not is_valid_package_atom(x, allow_repo=True):
1577 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1579 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1580 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1582 # Don't expand categories or old-style virtuals here unless
1583 # necessary. Expansion of old-style virtuals here causes at
1584 # least the following problems:
1585 # 1) It's more difficult to determine which set(s) an atom
1586 # came from, if any.
1587 # 2) It takes away freedom from the resolver to choose other
1588 # possible expansions when necessary.
1590 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
1591 root_config=root_config))
1593 expanded_atoms = self._dep_expand(root_config, x)
1594 installed_cp_set = set()
1595 for atom in expanded_atoms:
1596 if vardb.cp_list(atom.cp):
1597 installed_cp_set.add(atom.cp)
1599 if len(installed_cp_set) > 1:
1600 non_virtual_cps = set()
1601 for atom_cp in installed_cp_set:
1602 if not atom_cp.startswith("virtual/"):
1603 non_virtual_cps.add(atom_cp)
1604 if len(non_virtual_cps) == 1:
1605 installed_cp_set = non_virtual_cps
1607 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1608 installed_cp = next(iter(installed_cp_set))
1609 for atom in expanded_atoms:
1610 if atom.cp == installed_cp:
1612 for pkg in self._iter_match_pkgs_any(
1613 root_config, atom.without_use,
1615 if not pkg.installed:
1619 expanded_atoms = [atom]
1622 # If a non-virtual package and one or more virtual packages
1623 # are in expanded_atoms, use the non-virtual package.
1624 if len(expanded_atoms) > 1:
1625 number_of_virtuals = 0
1626 for expanded_atom in expanded_atoms:
1627 if expanded_atom.cp.startswith("virtual/"):
1628 number_of_virtuals += 1
1630 candidate = expanded_atom
1631 if len(expanded_atoms) - number_of_virtuals == 1:
1632 expanded_atoms = [ candidate ]
1634 if len(expanded_atoms) > 1:
1635 writemsg("\n\n", noiselevel=-1)
1636 ambiguous_package_name(x, expanded_atoms, root_config,
1637 self._frozen_config.spinner, self._frozen_config.myopts)
1638 return False, myfavorites
1640 atom = expanded_atoms[0]
1642 null_atom = Atom(insert_category_into_atom(x, "null"))
1643 cat, atom_pn = portage.catsplit(null_atom.cp)
1644 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1646 # Allow the depgraph to choose which virtual.
1647 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
1651 args.append(AtomArg(arg=x, atom=atom,
1652 root_config=root_config))
1656 search_for_multiple = False
1657 if len(lookup_owners) > 1:
1658 search_for_multiple = True
1660 for x in lookup_owners:
1661 if not search_for_multiple and os.path.isdir(x):
1662 search_for_multiple = True
1663 relative_paths.append(x[len(myroot)-1:])
1666 for pkg, relative_path in \
1667 real_vardb._owners.iter_owners(relative_paths):
1668 owners.add(pkg.mycpv)
1669 if not search_for_multiple:
1673 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1674 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1678 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1680 # portage now masks packages with missing slot, but it's
1681 # possible that one was installed by an older version
1682 atom = Atom(portage.cpv_getkey(cpv))
1684 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1685 args.append(AtomArg(arg=atom, atom=atom,
1686 root_config=root_config))
1688 if "--update" in self._frozen_config.myopts:
1689 # In some cases, the greedy slots behavior can pull in a slot that
1690 # the user would want to uninstall due to it being blocked by a
1691 # newer version in a different slot. Therefore, it's necessary to
1692 # detect and discard any that should be uninstalled. Each time
1693 # that arguments are updated, package selections are repeated in
1694 # order to ensure consistency with the current arguments:
1696 # 1) Initialize args
1697 # 2) Select packages and generate initial greedy atoms
1698 # 3) Update args with greedy atoms
1699 # 4) Select packages and generate greedy atoms again, while
1700 # accounting for any blockers between selected packages
1701 # 5) Update args with revised greedy atoms
1703 self._set_args(args)
1706 greedy_args.append(arg)
1707 if not isinstance(arg, AtomArg):
1709 for atom in self._greedy_slots(arg.root_config, arg.atom):
1711 AtomArg(arg=arg.arg, atom=atom,
1712 root_config=arg.root_config))
1714 self._set_args(greedy_args)
1717 # Revise greedy atoms, accounting for any blockers
1718 # between selected packages.
1719 revised_greedy_args = []
1721 revised_greedy_args.append(arg)
1722 if not isinstance(arg, AtomArg):
1724 for atom in self._greedy_slots(arg.root_config, arg.atom,
1725 blocker_lookahead=True):
1726 revised_greedy_args.append(
1727 AtomArg(arg=arg.arg, atom=atom,
1728 root_config=arg.root_config))
1729 args = revised_greedy_args
1730 del revised_greedy_args
1732 self._set_args(args)
1734 myfavorites = set(myfavorites)
1736 if isinstance(arg, (AtomArg, PackageArg)):
1737 myfavorites.add(arg.atom)
1738 elif isinstance(arg, SetArg):
1739 myfavorites.add(arg.arg)
1740 myfavorites = list(myfavorites)
1743 portage.writemsg("\n", noiselevel=-1)
1744 # Order needs to be preserved since a feature of --nodeps
1745 # is to allow the user to force a specific merge order.
1746 self._dynamic_config._initial_arg_list = args[:]
1748 return self._resolve(myfavorites)
1750 def _resolve(self, myfavorites):
1751 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1752 call self._creategraph to process theier deps and return
1754 debug = "--debug" in self._frozen_config.myopts
1755 onlydeps = "--onlydeps" in self._frozen_config.myopts
1756 myroot = self._frozen_config.target_root
1757 pkgsettings = self._frozen_config.pkgsettings[myroot]
1758 pprovideddict = pkgsettings.pprovideddict
1759 virtuals = pkgsettings.getvirtuals()
1760 for arg in self._expand_set_args(
1761 self._dynamic_config._initial_arg_list,
1762 add_to_digraph=True):
1763 for atom in arg.pset.getAtoms():
1764 self._spinner_update()
1765 dep = Dependency(atom=atom, onlydeps=onlydeps,
1766 root=myroot, parent=arg)
1768 pprovided = pprovideddict.get(atom.cp)
1769 if pprovided and portage.match_from_list(atom, pprovided):
1770 # A provided package has been specified on the command line.
1771 self._dynamic_config._pprovided_args.append((arg, atom))
1773 if isinstance(arg, PackageArg):
1774 if not self._add_pkg(arg.package, dep) or \
1775 not self._create_graph():
1776 if not self._dynamic_config._need_restart:
1777 sys.stderr.write(("\n\n!!! Problem " + \
1778 "resolving dependencies for %s\n") % \
1780 return 0, myfavorites
1783 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1784 (arg, atom), noiselevel=-1)
1785 pkg, existing_node = self._select_package(
1786 myroot, atom, onlydeps=onlydeps)
1788 pprovided_match = False
1789 for virt_choice in virtuals.get(atom.cp, []):
1790 expanded_atom = portage.dep.Atom(
1791 atom.replace(atom.cp, virt_choice.cp, 1))
1792 pprovided = pprovideddict.get(expanded_atom.cp)
1794 portage.match_from_list(expanded_atom, pprovided):
1795 # A provided package has been
1796 # specified on the command line.
1797 self._dynamic_config._pprovided_args.append((arg, atom))
1798 pprovided_match = True
1803 if not (isinstance(arg, SetArg) and \
1804 arg.name in ("selected", "system", "world")):
1805 self._dynamic_config._unsatisfied_deps_for_display.append(
1806 ((myroot, atom), {"myparent" : arg}))
1807 return 0, myfavorites
1808 self._dynamic_config._missing_args.append((arg, atom))
1810 if atom.cp != pkg.cp:
1811 # For old-style virtuals, we need to repeat the
1812 # package.provided check against the selected package.
1813 expanded_atom = atom.replace(atom.cp, pkg.cp)
1814 pprovided = pprovideddict.get(pkg.cp)
1816 portage.match_from_list(expanded_atom, pprovided):
1817 # A provided package has been
1818 # specified on the command line.
1819 self._dynamic_config._pprovided_args.append((arg, atom))
1821 if pkg.installed and "selective" not in self._dynamic_config.myparams:
1822 self._dynamic_config._unsatisfied_deps_for_display.append(
1823 ((myroot, atom), {"myparent" : arg}))
1824 # Previous behavior was to bail out in this case, but
1825 # since the dep is satisfied by the installed package,
1826 # it's more friendly to continue building the graph
1827 # and just show a warning message. Therefore, only bail
1828 # out here if the atom is not from either the system or
1830 if not (isinstance(arg, SetArg) and \
1831 arg.name in ("selected", "system", "world")):
1832 return 0, myfavorites
1834 # Add the selected package to the graph as soon as possible
1835 # so that later dep_check() calls can use it as feedback
1836 # for making more consistent atom selections.
1837 if not self._add_pkg(pkg, dep):
1838 if self._dynamic_config._need_restart:
1840 elif isinstance(arg, SetArg):
1841 sys.stderr.write(("\n\n!!! Problem resolving " + \
1842 "dependencies for %s from %s\n") % \
1845 sys.stderr.write(("\n\n!!! Problem resolving " + \
1846 "dependencies for %s\n") % atom)
1847 return 0, myfavorites
1849 except portage.exception.MissingSignature as e:
1850 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1851 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1852 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1853 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1854 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1855 return 0, myfavorites
1856 except portage.exception.InvalidSignature as e:
1857 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1858 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1859 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1860 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1861 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1862 return 0, myfavorites
1863 except SystemExit as e:
1864 raise # Needed else can't exit
1865 except Exception as e:
1866 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
1867 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
1870 # Now that the root packages have been added to the graph,
1871 # process the dependencies.
1872 if not self._create_graph():
1873 return 0, myfavorites
1876 if "--usepkgonly" in self._frozen_config.myopts:
1877 for xs in self._dynamic_config.digraph.all_nodes():
1878 if not isinstance(xs, Package):
1880 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1882 writemsg("\n", noiselevel=-1)
1884 writemsg("Missing binary for: %s\n" % xs[2], noiselevel=-1)
1888 except self._unknown_internal_error:
1889 return False, myfavorites
1891 if set(self._dynamic_config.digraph).intersection( \
1892 self._dynamic_config._needed_unstable_keywords) or \
1893 set(self._dynamic_config.digraph).intersection( \
1894 self._dynamic_config._needed_use_config_changes) or \
1895 set(self._dynamic_config.digraph).intersection( \
1896 self._dynamic_config._needed_license_changes) :
1897 #We failed if the user needs to change the configuration
1899 self._dynamic_config._success_without_autounmask = True
1900 return False, myfavorites
1902 # We're true here unless we are missing binaries.
1903 return (not missing,myfavorites)
1905 def _set_args(self, args):
1907 Create the "__non_set_args__" package set from atoms and packages given as
1908 arguments. This method can be called multiple times if necessary.
1909 The package selection cache is automatically invalidated, since
1910 arguments influence package selections.
1915 for root in self._dynamic_config.sets:
1916 depgraph_sets = self._dynamic_config.sets[root]
1917 depgraph_sets.sets.setdefault('__non_set_args__',
1918 InternalPackageSet(allow_repo=True)).clear()
1919 depgraph_sets.atoms.clear()
1920 depgraph_sets.atom_arg_map.clear()
1921 set_atoms[root] = []
1922 non_set_atoms[root] = []
1924 # We don't add set args to the digraph here since that
1925 # happens at a later stage and we don't want to make
1926 # any state changes here that aren't reversed by a
1927 # another call to this method.
1928 for arg in self._expand_set_args(args, add_to_digraph=False):
1929 atom_arg_map = self._dynamic_config.sets[
1930 arg.root_config.root].atom_arg_map
1931 if isinstance(arg, SetArg):
1932 atom_group = set_atoms[arg.root_config.root]
1934 atom_group = non_set_atoms[arg.root_config.root]
1936 for atom in arg.pset.getAtoms():
1937 atom_group.append(atom)
1938 atom_key = (atom, arg.root_config.root)
1939 refs = atom_arg_map.get(atom_key)
1942 atom_arg_map[atom_key] = refs
1946 for root in self._dynamic_config.sets:
1947 depgraph_sets = self._dynamic_config.sets[root]
1948 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
1949 non_set_atoms.get(root, [])))
1950 depgraph_sets.sets['__non_set_args__'].update(
1951 non_set_atoms.get(root, []))
1953 # Invalidate the package selection cache, since
1954 # arguments influence package selections.
1955 self._dynamic_config._highest_pkg_cache.clear()
1956 for trees in self._dynamic_config._filtered_trees.values():
1957 trees["porttree"].dbapi._clear_cache()
1959 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1961 Return a list of slot atoms corresponding to installed slots that
1962 differ from the slot of the highest visible match. When
1963 blocker_lookahead is True, slot atoms that would trigger a blocker
1964 conflict are automatically discarded, potentially allowing automatic
1965 uninstallation of older slots when appropriate.
1967 highest_pkg, in_graph = self._select_package(root_config.root, atom)
1968 if highest_pkg is None:
1970 vardb = root_config.trees["vartree"].dbapi
1972 for cpv in vardb.match(atom):
1973 # don't mix new virtuals with old virtuals
1974 if portage.cpv_getkey(cpv) == highest_pkg.cp:
1975 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1977 slots.add(highest_pkg.metadata["SLOT"])
1981 slots.remove(highest_pkg.metadata["SLOT"])
1984 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1985 pkg, in_graph = self._select_package(root_config.root, slot_atom)
1986 if pkg is not None and \
1987 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1988 greedy_pkgs.append(pkg)
1991 if not blocker_lookahead:
1992 return [pkg.slot_atom for pkg in greedy_pkgs]
1995 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1996 for pkg in greedy_pkgs + [highest_pkg]:
1997 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1999 selected_atoms = self._select_atoms(
2000 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2001 parent=pkg, strict=True)
2002 except portage.exception.InvalidDependString:
2005 for atoms in selected_atoms.values():
2006 blocker_atoms.extend(x for x in atoms if x.blocker)
2007 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2009 if highest_pkg not in blockers:
2012 # filter packages with invalid deps
2013 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2015 # filter packages that conflict with highest_pkg
2016 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2017 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2018 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2023 # If two packages conflict, discard the lower version.
2024 discard_pkgs = set()
2025 greedy_pkgs.sort(reverse=True)
2026 for i in range(len(greedy_pkgs) - 1):
2027 pkg1 = greedy_pkgs[i]
2028 if pkg1 in discard_pkgs:
2030 for j in range(i + 1, len(greedy_pkgs)):
2031 pkg2 = greedy_pkgs[j]
2032 if pkg2 in discard_pkgs:
2034 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2035 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2037 discard_pkgs.add(pkg2)
2039 return [pkg.slot_atom for pkg in greedy_pkgs \
2040 if pkg not in discard_pkgs]
2042 def _select_atoms_from_graph(self, *pargs, **kwargs):
2044 Prefer atoms matching packages that have already been
2045 added to the graph or those that are installed and have
2046 not been scheduled for replacement.
2048 kwargs["trees"] = self._dynamic_config._graph_trees
2049 return self._select_atoms_highest_available(*pargs, **kwargs)
2051 def _select_atoms_highest_available(self, root, depstring,
2052 myuse=None, parent=None, strict=True, trees=None, priority=None):
2053 """This will raise InvalidDependString if necessary. If trees is
2054 None then self._dynamic_config._filtered_trees is used."""
2056 pkgsettings = self._frozen_config.pkgsettings[root]
2058 trees = self._dynamic_config._filtered_trees
2059 mytrees = trees[root]
2060 atom_graph = digraph()
2062 # Temporarily disable autounmask so that || preferences
2063 # account for masking and USE settings.
2064 _autounmask_backup = self._dynamic_config._autounmask
2065 self._dynamic_config._autounmask = False
2066 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2068 if parent is not None:
2069 trees[root]["parent"] = parent
2070 trees[root]["atom_graph"] = atom_graph
2071 if priority is not None:
2072 trees[root]["priority"] = priority
2073 mycheck = portage.dep_check(depstring, None,
2074 pkgsettings, myuse=myuse,
2075 myroot=root, trees=trees)
2077 self._dynamic_config._autounmask = _autounmask_backup
2078 del mytrees["pkg_use_enabled"]
2079 if parent is not None:
2080 trees[root].pop("parent")
2081 trees[root].pop("atom_graph")
2082 if priority is not None:
2083 trees[root].pop("priority")
2085 raise portage.exception.InvalidDependString(mycheck[1])
2087 selected_atoms = mycheck[1]
2089 chosen_atoms = frozenset(mycheck[1])
2090 selected_atoms = {parent : []}
2091 for node in atom_graph:
2092 if isinstance(node, Atom):
2097 pkg, virt_atom = node
2098 if virt_atom not in chosen_atoms:
2100 if not portage.match_from_list(virt_atom, [pkg]):
2101 # Typically this means that the atom
2102 # specifies USE deps that are unsatisfied
2103 # by the selected package. The caller will
2104 # record this as an unsatisfied dependency
2108 selected_atoms[pkg] = [atom for atom in \
2109 atom_graph.child_nodes(node) if atom in chosen_atoms]
2111 return selected_atoms
2113 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2114 check_backtrack=False):
2116 When check_backtrack=True, no output is produced and
2117 the method either returns or raises _backtrack_mask if
2118 a matching package has been masked by backtracking.
2120 backtrack_mask = False
2121 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
2122 xinfo = '"%s"' % atom.unevaluated_atom
2125 # Discard null/ from failed cpv_expand category expansion.
2126 xinfo = xinfo.replace("null/", "")
2127 masked_packages = []
2129 masked_pkg_instances = set()
2130 missing_licenses = []
2131 have_eapi_mask = False
2132 pkgsettings = self._frozen_config.pkgsettings[root]
2133 root_config = self._frozen_config.roots[root]
2134 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2135 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2136 for db, pkg_type, built, installed, db_keys in dbs:
2140 if hasattr(db, "xmatch"):
2141 cpv_list = db.xmatch("match-all", atom.without_use)
2143 cpv_list = db.match(atom.without_use)
2145 if atom.repo is None and hasattr(db, "getRepositories"):
2146 repo_list = db.getRepositories()
2148 repo_list = [atom.repo]
2152 for cpv in cpv_list:
2153 for repo in repo_list:
2154 if not db.cpv_exists(cpv, myrepo=repo):
2157 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2158 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2160 if metadata is not None:
2161 pkg = self._pkg(cpv, pkg_type, root_config,
2162 installed=installed, myrepo=repo)
2163 # pkg.metadata contains calculated USE for ebuilds,
2164 # required later for getMissingLicenses.
2165 metadata = pkg.metadata
2166 if pkg.cp != atom.cp:
2167 # A cpv can be returned from dbapi.match() as an
2168 # old-style virtual match even in cases when the
2169 # package does not actually PROVIDE the virtual.
2170 # Filter out any such false matches here.
2171 if not atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2173 if pkg in self._dynamic_config._runtime_pkg_mask:
2174 backtrack_reasons = \
2175 self._dynamic_config._runtime_pkg_mask[pkg]
2176 mreasons.append('backtracking: %s' % \
2177 ', '.join(sorted(backtrack_reasons)))
2178 backtrack_mask = True
2179 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2180 modified_use=self._pkg_use_enabled(pkg)):
2181 mreasons = ["exclude option"]
2183 masked_pkg_instances.add(pkg)
2184 if atom.unevaluated_atom.use:
2186 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2187 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2188 missing_use.append(pkg)
2192 writemsg("violated_conditionals raised " + \
2193 "InvalidAtom: '%s' parent: %s" % \
2194 (atom, myparent), noiselevel=-1)
2196 if pkg.built and not mreasons:
2197 mreasons = ["use flag configuration mismatch"]
2198 masked_packages.append(
2199 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
2203 raise self._backtrack_mask()
2207 missing_use_reasons = []
2208 missing_iuse_reasons = []
2209 for pkg in missing_use:
2210 use = self._pkg_use_enabled(pkg)
2212 #Use the unevaluated atom here, because some flags might have gone
2213 #lost during evaluation.
2214 required_flags = atom.unevaluated_atom.use.required
2215 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2219 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2220 missing_iuse_reasons.append((pkg, mreasons))
2222 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2223 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2225 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
2226 pkgsettings.setcpv(pkg)
2227 untouchable_flags = \
2228 frozenset(chain(pkgsettings.usemask, pkgsettings.useforce))
2229 if untouchable_flags.intersection(
2230 chain(need_enable, need_disable)):
2233 required_use = pkg.metadata["REQUIRED_USE"]
2234 required_use_warning = ""
2236 old_use = self._pkg_use_enabled(pkg)
2237 new_use = set(self._pkg_use_enabled(pkg))
2238 for flag in need_enable:
2240 for flag in need_disable:
2241 new_use.discard(flag)
2242 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2243 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2244 required_use_warning = ", this change violates use flag constraints " + \
2245 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2247 if need_enable or need_disable:
2249 changes.extend(colorize("red", "+" + x) \
2250 for x in need_enable)
2251 changes.extend(colorize("blue", "-" + x) \
2252 for x in need_disable)
2253 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2254 missing_use_reasons.append((pkg, mreasons))
2256 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2257 # Lets see if the violated use deps are conditional.
2258 # If so, suggest to change them on the parent.
2260 # If the child package is masked then a change to
2261 # parent USE is not a valid solution (a normal mask
2262 # message should be displayed instead).
2263 if pkg in masked_pkg_instances:
2267 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2268 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2269 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2270 #all violated use deps are conditional
2272 conditional = violated_atom.use.conditional
2273 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2274 conditional.enabled, conditional.disabled))
2276 pkgsettings = self._frozen_config.pkgsettings[myparent.root]
2277 pkgsettings.setcpv(myparent)
2278 untouchable_flags = \
2279 frozenset(chain(pkgsettings.usemask, pkgsettings.useforce))
2280 if untouchable_flags.intersection(involved_flags):
2283 required_use = myparent.metadata["REQUIRED_USE"]
2284 required_use_warning = ""
2286 old_use = self._pkg_use_enabled(myparent)
2287 new_use = set(self._pkg_use_enabled(myparent))
2288 for flag in involved_flags:
2290 new_use.discard(flag)
2293 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2294 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2295 required_use_warning = ", this change violates use flag constraints " + \
2296 "defined by %s: '%s'" % (myparent.cpv, \
2297 human_readable_required_use(required_use))
2299 for flag in involved_flags:
2300 if flag in self._pkg_use_enabled(myparent):
2301 changes.append(colorize("blue", "-" + flag))
2303 changes.append(colorize("red", "+" + flag))
2304 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2305 if (myparent, mreasons) not in missing_use_reasons:
2306 missing_use_reasons.append((myparent, mreasons))
2308 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2309 in missing_use_reasons if pkg not in masked_pkg_instances]
2311 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2312 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2314 show_missing_use = False
2315 if unmasked_use_reasons:
2316 # Only show the latest version.
2317 show_missing_use = []
2319 parent_reason = None
2320 for pkg, mreasons in unmasked_use_reasons:
2322 if parent_reason is None:
2323 #This happens if a use change on the parent
2324 #leads to a satisfied conditional use dep.
2325 parent_reason = (pkg, mreasons)
2326 elif pkg_reason is None:
2327 #Don't rely on the first pkg in unmasked_use_reasons,
2328 #being the highest version of the dependency.
2329 pkg_reason = (pkg, mreasons)
2331 show_missing_use.append(pkg_reason)
2333 show_missing_use.append(parent_reason)
2335 elif unmasked_iuse_reasons:
2336 masked_with_iuse = False
2337 for pkg in masked_pkg_instances:
2338 #Use atom.unevaluated here, because some flags might have gone
2339 #lost during evaluation.
2340 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2341 # Package(s) with required IUSE are masked,
2342 # so display a normal masking message.
2343 masked_with_iuse = True
2345 if not masked_with_iuse:
2346 show_missing_use = unmasked_iuse_reasons
2350 if show_missing_use:
2351 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2352 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
2353 for pkg, mreasons in show_missing_use:
2354 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
2356 elif masked_packages:
2357 writemsg_stdout("\n!!! " + \
2358 colorize("BAD", "All ebuilds that could satisfy ") + \
2359 colorize("INFORM", xinfo) + \
2360 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
2361 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
2362 have_eapi_mask = show_masked_packages(masked_packages)
2364 writemsg_stdout("\n", noiselevel=-1)
2365 msg = ("The current version of portage supports " + \
2366 "EAPI '%s'. You must upgrade to a newer version" + \
2367 " of portage before EAPI masked packages can" + \
2368 " be installed.") % portage.const.EAPI
2369 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
2370 writemsg_stdout("\n", noiselevel=-1)
2373 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
2375 # Show parent nodes and the argument that pulled them in.
2376 traversed_nodes = set()
2379 while node is not None:
2380 traversed_nodes.add(node)
2381 if isinstance(node, DependencyArg):
2382 msg.append('(dependency required by "%s")' % \
2383 colorize('INFORM', _unicode_decode("%s") % (node,)))
2385 msg.append('(dependency required by "%s" [%s])' % \
2386 (colorize('INFORM', _unicode_decode("%s") % \
2387 (node.cpv,)), node.type_name))
2389 if node not in self._dynamic_config.digraph:
2390 # The parent is not in the graph due to backtracking.
2393 # When traversing to parents, prefer arguments over packages
2394 # since arguments are root nodes. Never traverse the same
2395 # package twice, in order to prevent an infinite loop.
2396 selected_parent = None
2397 for parent in self._dynamic_config.digraph.parent_nodes(node):
2398 if parent in traversed_nodes:
2400 if isinstance(parent, DependencyArg):
2401 if self._dynamic_config.digraph.parent_nodes(parent):
2402 selected_parent = parent
2404 msg.append('(dependency required by "%s" [argument])' % \
2405 colorize('INFORM', _unicode_decode("%s") % (parent,)))
2406 selected_parent = None
2409 selected_parent = parent
2410 node = selected_parent
2411 writemsg_stdout("\n".join(msg), noiselevel=-1)
2412 writemsg_stdout("\n", noiselevel=-1)
2416 writemsg_stdout("\n", noiselevel=-1)
2418 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
2419 for db, pkg_type, built, installed, db_keys in \
2420 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2421 for pkg in self._iter_match_pkgs(root_config,
2422 pkg_type, atom, onlydeps=onlydeps):
2425 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
2427 Iterate over Package instances of pkg_type matching the given atom.
2428 This does not check visibility and it also does not match USE for
2429 unbuilt ebuilds since USE are lazily calculated after visibility
2430 checks (to avoid the expense when possible).
2433 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
2435 if hasattr(db, "xmatch"):
2436 cpv_list = db.xmatch("match-all", atom)
2438 cpv_list = db.match(atom)
2440 # USE=multislot can make an installed package appear as if
2441 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2442 # won't do any good as long as USE=multislot is enabled since
2443 # the newly built package still won't have the expected slot.
2444 # Therefore, assume that such SLOT dependencies are already
2445 # satisfied rather than forcing a rebuild.
2446 installed = pkg_type == 'installed'
2447 if installed and not cpv_list and atom.slot:
2448 for cpv in db.match(atom.cp):
2449 slot_available = False
2450 for other_db, other_type, other_built, \
2451 other_installed, other_keys in \
2452 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
2455 other_db.aux_get(cpv, ["SLOT"])[0]:
2456 slot_available = True
2460 if not slot_available:
2462 inst_pkg = self._pkg(cpv, "installed",
2463 root_config, installed=installed, myrepo = atom.repo)
2464 # Remove the slot from the atom and verify that
2465 # the package matches the resulting atom.
2466 atom_without_slot = portage.dep.remove_slot(atom)
2468 atom_without_slot += str(atom.use)
2469 atom_without_slot = portage.dep.Atom(atom_without_slot)
2470 if portage.match_from_list(
2471 atom_without_slot, [inst_pkg]):
2472 cpv_list = [inst_pkg.cpv]
2476 if atom.repo is None and hasattr(db, "getRepositories"):
2477 repo_list = db.getRepositories()
2479 repo_list = [atom.repo]
2483 for cpv in cpv_list:
2484 for repo in repo_list:
2485 if not db.cpv_exists(cpv, myrepo=repo):
2489 pkg = self._pkg(cpv, pkg_type, root_config,
2490 installed=installed, onlydeps=onlydeps, myrepo=repo)
2491 except portage.exception.PackageNotFound:
2494 # A cpv can be returned from dbapi.match() as an
2495 # old-style virtual match even in cases when the
2496 # package does not actually PROVIDE the virtual.
2497 # Filter out any such false matches here.
2499 # Make sure that cpv from the current repo satisfies the atom.
2500 # This might not be the case if there are several repos with
2501 # the same cpv, but different metadata keys, like SLOT.
2502 if not InternalPackageSet(initial_atoms=(atom,), allow_repo=True,
2503 ).findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
2507 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2508 cache_key = (root, atom, onlydeps)
2509 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
2512 if pkg and not existing:
2513 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2514 if existing and existing == pkg:
2515 # Update the cache to reflect that the
2516 # package has been added to the graph.
2518 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2520 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2521 self._dynamic_config._highest_pkg_cache[cache_key] = ret
2524 settings = pkg.root_config.settings
2525 if self._pkg_visibility_check(pkg) and not (pkg.installed and \
2526 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2527 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
2530 def _want_installed_pkg(self, pkg):
2532 Given an installed package returned from select_pkg, return
2533 True if the user has not explicitly requested for this package
2534 to be replaced (typically via an atom on the command line).
2536 if "selective" not in self._dynamic_config.myparams and \
2537 pkg.root == self._frozen_config.target_root:
2539 next(self._iter_atoms_for_pkg(pkg))
2540 except StopIteration:
2542 except portage.exception.InvalidDependString:
2548 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2549 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2551 default_selection = (pkg, existing)
2553 if self._dynamic_config._autounmask is True:
2554 if pkg is not None and \
2556 not self._want_installed_pkg(pkg):
2559 for only_use_changes in True, False:
2564 self._wrapped_select_pkg_highest_available_imp(
2565 root, atom, onlydeps=onlydeps,
2566 allow_use_changes=True,
2567 allow_unstable_keywords=(not only_use_changes),
2568 allow_license_changes=(not only_use_changes))
2570 if pkg is not None and \
2572 not self._want_installed_pkg(pkg):
2575 if self._dynamic_config._need_restart:
2579 # This ensures that we can fall back to an installed package
2580 # that may have been rejected in the autounmask path above.
2581 return default_selection
2583 return pkg, existing
2585 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False):
2590 if self._frozen_config.myopts.get('--autounmask', 'n') is not True:
2593 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
2594 root_config = self._frozen_config.roots[pkg.root]
2595 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
2597 masked_by_unstable_keywords = False
2598 missing_licenses = None
2599 masked_by_something_else = False
2601 for reason in mreasons:
2602 hint = reason.unmask_hint
2605 masked_by_something_else = True
2606 elif hint.key == "unstable keyword":
2607 masked_by_unstable_keywords = True
2608 elif hint.key == "license":
2609 missing_licenses = hint.value
2611 masked_by_something_else = True
2613 if masked_by_something_else:
2616 if pkg in self._dynamic_config._needed_unstable_keywords:
2617 #If the package is already keyworded, remove the mask.
2618 masked_by_unstable_keywords = False
2620 if missing_licenses:
2621 #If the needed licenses are already unmasked, remove the mask.
2622 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
2624 if not (masked_by_unstable_keywords or missing_licenses):
2625 #Package has already been unmasked.
2628 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
2629 (missing_licenses and not allow_license_changes):
2630 #We are not allowed to do the needed changes.
2633 if masked_by_unstable_keywords:
2634 self._dynamic_config._needed_unstable_keywords.add(pkg)
2635 backtrack_infos = self._dynamic_config._backtrack_infos
2636 backtrack_infos.setdefault("config", {})
2637 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
2638 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
2641 if missing_licenses:
2642 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
2643 backtrack_infos = self._dynamic_config._backtrack_infos
2644 backtrack_infos.setdefault("config", {})
2645 backtrack_infos["config"].setdefault("needed_license_changes", set())
2646 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
2650 def _pkg_use_enabled(self, pkg, target_use=None):
2652 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
2653 If target_use is given, the need changes are computed to make the package useable.
2654 Example: target_use = { "foo": True, "bar": False }
2655 The flags target_use must be in the pkg's IUSE.
2657 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
2659 if target_use is None:
2660 if needed_use_config_change is None:
2661 return pkg.use.enabled
2663 return needed_use_config_change[0]
2665 if needed_use_config_change is not None:
2666 old_use = needed_use_config_change[0]
2668 old_changes = needed_use_config_change[1]
2669 new_changes = old_changes.copy()
2671 old_use = pkg.use.enabled
2676 for flag, state in target_use.items():
2678 if flag not in old_use:
2679 if new_changes.get(flag) == False:
2681 new_changes[flag] = True
2685 if new_changes.get(flag) == True:
2687 new_changes[flag] = False
2688 new_use.update(old_use.difference(target_use))
2690 def want_restart_for_use_change(pkg, new_use):
2691 if pkg not in self._dynamic_config.digraph.nodes:
2694 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
2695 dep = pkg.metadata[key]
2696 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2697 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
2699 if old_val != new_val:
2702 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
2703 if not parent_atoms:
2706 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
2707 for ppkg, atom in parent_atoms:
2708 if not atom.use or \
2709 not atom.use.required.intersection(changes):
2716 if new_changes != old_changes:
2717 #Don't do the change if it violates REQUIRED_USE.
2718 required_use = pkg.metadata["REQUIRED_USE"]
2719 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2720 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2723 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
2724 backtrack_infos = self._dynamic_config._backtrack_infos
2725 backtrack_infos.setdefault("config", {})
2726 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
2727 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
2728 if want_restart_for_use_change(pkg, new_use):
2729 self._dynamic_config._need_restart = True
2732 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
2733 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False):
2734 root_config = self._frozen_config.roots[root]
2735 pkgsettings = self._frozen_config.pkgsettings[root]
2736 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2737 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2738 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2739 # List of acceptable packages, ordered by type preference.
2740 matched_packages = []
2741 matched_pkgs_ignore_use = []
2742 highest_version = None
2743 if not isinstance(atom, portage.dep.Atom):
2744 atom = portage.dep.Atom(atom)
2746 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
2747 existing_node = None
2749 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
2750 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
2751 empty = "empty" in self._dynamic_config.myparams
2752 selective = "selective" in self._dynamic_config.myparams
2754 noreplace = "--noreplace" in self._frozen_config.myopts
2755 avoid_update = "--update" not in self._frozen_config.myopts
2756 dont_miss_updates = "--update" in self._frozen_config.myopts
2757 use_ebuild_visibility = self._frozen_config.myopts.get(
2758 '--use-ebuild-visibility', 'n') != 'n'
2759 # Behavior of the "selective" parameter depends on
2760 # whether or not a package matches an argument atom.
2761 # If an installed package provides an old-style
2762 # virtual that is no longer provided by an available
2763 # package, the installed package may match an argument
2764 # atom even though none of the available packages do.
2765 # Therefore, "selective" logic does not consider
2766 # whether or not an installed package matches an
2767 # argument atom. It only considers whether or not
2768 # available packages match argument atoms, which is
2769 # represented by the found_available_arg flag.
2770 found_available_arg = False
2771 packages_with_invalid_use_config = []
2772 for find_existing_node in True, False:
2775 for db, pkg_type, built, installed, db_keys in dbs:
2778 if installed and not find_existing_node:
2779 want_reinstall = reinstall or empty or \
2780 (found_available_arg and not selective)
2781 if want_reinstall and matched_packages:
2784 # Ignore USE deps for the initial match since we want to
2785 # ensure that updates aren't missed solely due to the user's
2786 # USE configuration.
2787 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
2789 if pkg in self._dynamic_config._runtime_pkg_mask:
2790 # The package has been masked by the backtracking logic
2793 if not pkg.installed and \
2794 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2795 modified_use=self._pkg_use_enabled(pkg)):
2798 if dont_miss_updates:
2799 # Check if a higher version was rejected due to user
2800 # USE configuration. The packages_with_invalid_use_config
2801 # list only contains unbuilt ebuilds since USE can't
2802 # be changed for built packages.
2803 higher_version_rejected = False
2804 for rejected in packages_with_invalid_use_config:
2805 if rejected.cp != pkg.cp:
2808 higher_version_rejected = True
2810 if higher_version_rejected:
2814 # Make --noreplace take precedence over --newuse.
2815 if not pkg.installed and noreplace and \
2816 cpv in vardb.match(atom):
2817 inst_pkg = self._pkg(pkg.cpv, "installed",
2818 root_config, installed=True)
2819 if inst_pkg.visible:
2820 # If the installed version is masked, it may
2821 # be necessary to look at lower versions,
2822 # in case there is a visible downgrade.
2824 reinstall_for_flags = None
2826 if not pkg.installed or \
2827 (matched_packages and not avoid_update):
2828 # Only enforce visibility on installed packages
2829 # if there is at least one other visible package
2830 # available. By filtering installed masked packages
2831 # here, packages that have been masked since they
2832 # were installed can be automatically downgraded
2833 # to an unmasked version.
2835 if not self._pkg_visibility_check(pkg, \
2836 allow_unstable_keywords=allow_unstable_keywords,
2837 allow_license_changes=allow_license_changes):
2840 # Enable upgrade or downgrade to a version
2841 # with visible KEYWORDS when the installed
2842 # version is masked by KEYWORDS, but never
2843 # reinstall the same exact version only due
2844 # to a KEYWORDS mask. See bug #252167.
2845 if matched_packages:
2847 different_version = None
2848 for avail_pkg in matched_packages:
2849 if not portage.dep.cpvequal(
2850 pkg.cpv, avail_pkg.cpv):
2851 different_version = avail_pkg
2853 if different_version is not None:
2854 # If the ebuild no longer exists or it's
2855 # keywords have been dropped, reject built
2856 # instances (installed or binary).
2857 # If --usepkgonly is enabled, assume that
2858 # the ebuild status should be ignored.
2859 if not use_ebuild_visibility and usepkgonly:
2861 pkgsettings._getMissingKeywords(
2862 pkg.cpv, pkg.metadata):
2867 pkg.cpv, "ebuild", root_config, myrepo=atom.repo)
2868 except portage.exception.PackageNotFound:
2871 if not self._pkg_visibility_check(pkg_eb, \
2872 allow_unstable_keywords=allow_unstable_keywords,
2873 allow_license_changes=allow_license_changes):
2876 # Calculation of USE for unbuilt ebuilds is relatively
2877 # expensive, so it is only performed lazily, after the
2878 # above visibility checks are complete.
2881 if root == self._frozen_config.target_root:
2883 myarg = next(self._iter_atoms_for_pkg(pkg))
2884 except StopIteration:
2886 except portage.exception.InvalidDependString:
2888 # masked by corruption
2890 if not installed and myarg:
2891 found_available_arg = True
2894 if pkg.iuse.get_missing_iuse(atom.use.required):
2895 # Don't add this to packages_with_invalid_use_config
2896 # since IUSE cannot be adjusted by the user.
2899 matched_pkgs_ignore_use.append(pkg)
2900 if allow_use_changes:
2902 for flag in atom.use.enabled:
2903 target_use[flag] = True
2904 for flag in atom.use.disabled:
2905 target_use[flag] = False
2906 use = self._pkg_use_enabled(pkg, target_use)
2908 use = self._pkg_use_enabled(pkg)
2910 if atom.use.enabled.difference(use) and \
2911 atom.use.enabled.difference(use).difference(atom.use.missing_enabled.difference(pkg.iuse.all)):
2913 packages_with_invalid_use_config.append(pkg)
2915 if atom.use.disabled.intersection(use) or \
2916 atom.use.disabled.difference(pkg.iuse.all).difference(atom.use.missing_disabled):
2918 packages_with_invalid_use_config.append(pkg)
2920 elif atom.unevaluated_atom.use:
2921 #Make sure we don't miss a 'missing IUSE'.
2922 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2923 # Don't add this to packages_with_invalid_use_config
2924 # since IUSE cannot be adjusted by the user.
2927 #check REQUIRED_USE constraints
2928 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
2929 eapi_has_required_use(pkg.metadata["EAPI"]):
2930 required_use = pkg.metadata["REQUIRED_USE"]
2931 use = self._pkg_use_enabled(pkg)
2933 required_use_is_sat = check_required_use(
2934 pkg.metadata["REQUIRED_USE"], use, pkg.iuse.is_valid_flag)
2935 except portage.exception.InvalidDependString as e:
2936 portage.writemsg("!!! Invalid REQUIRED_USE specified by " + \
2937 "'%s': %s\n" % (pkg.cpv, str(e)), noiselevel=-1)
2940 if not required_use_is_sat:
2943 if pkg.cp == atom_cp:
2944 if highest_version is None:
2945 highest_version = pkg
2946 elif pkg > highest_version:
2947 highest_version = pkg
2948 # At this point, we've found the highest visible
2949 # match from the current repo. Any lower versions
2950 # from this repo are ignored, so this so the loop
2951 # will always end with a break statement below
2953 if find_existing_node:
2954 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
2957 # Use PackageSet.findAtomForPackage()
2958 # for PROVIDE support.
2959 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
2960 if highest_version and \
2961 e_pkg.cp == atom_cp and \
2962 e_pkg < highest_version and \
2963 e_pkg.slot_atom != highest_version.slot_atom:
2964 # There is a higher version available in a
2965 # different slot, so this existing node is
2969 matched_packages.append(e_pkg)
2970 existing_node = e_pkg
2972 # Compare built package to current config and
2973 # reject the built package if necessary.
2974 if built and (not installed or matched_pkgs_ignore_use) and \
2975 ("--newuse" in self._frozen_config.myopts or \
2976 "--reinstall" in self._frozen_config.myopts or \
2977 "--binpkg-respect-use" in self._frozen_config.myopts):
2978 iuses = pkg.iuse.all
2979 old_use = self._pkg_use_enabled(pkg)
2981 pkgsettings.setcpv(myeb)
2983 pkgsettings.setcpv(pkg)
2984 now_use = pkgsettings["PORTAGE_USE"].split()
2985 forced_flags = set()
2986 forced_flags.update(pkgsettings.useforce)
2987 forced_flags.update(pkgsettings.usemask)
2989 if myeb and not usepkgonly:
2990 cur_iuse = myeb.iuse.all
2991 if self._reinstall_for_flags(forced_flags,
2995 # Compare current config to installed package
2996 # and do not reinstall if possible.
2997 if not installed and \
2998 ("--newuse" in self._frozen_config.myopts or \
2999 "--reinstall" in self._frozen_config.myopts) and \
3000 cpv in vardb.match(atom):
3001 pkgsettings.setcpv(pkg)
3002 forced_flags = set()
3003 forced_flags.update(pkgsettings.useforce)
3004 forced_flags.update(pkgsettings.usemask)
3005 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3006 old_iuse = set(filter_iuse_defaults(
3007 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3008 cur_use = self._pkg_use_enabled(pkg)
3009 cur_iuse = pkg.iuse.all
3010 reinstall_for_flags = \
3011 self._reinstall_for_flags(
3012 forced_flags, old_use, old_iuse,
3014 if reinstall_for_flags:
3018 matched_packages.append(pkg)
3019 if reinstall_for_flags:
3020 self._dynamic_config._reinstall_nodes[pkg] = \
3024 if not matched_packages:
3027 if "--debug" in self._frozen_config.myopts:
3028 for pkg in matched_packages:
3029 portage.writemsg("%s %s\n" % \
3030 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3032 # Filter out any old-style virtual matches if they are
3033 # mixed with new-style virtual matches.
3035 if len(matched_packages) > 1 and \
3036 "virtual" == portage.catsplit(cp)[0]:
3037 for pkg in matched_packages:
3040 # Got a new-style virtual, so filter
3041 # out any old-style virtuals.
3042 matched_packages = [pkg for pkg in matched_packages \
3046 if existing_node is not None and \
3047 existing_node in matched_packages:
3048 return existing_node, existing_node
3050 if len(matched_packages) > 1:
3051 if rebuilt_binaries:
3054 for pkg in matched_packages:
3059 if built_pkg is not None and inst_pkg is not None:
3060 # Only reinstall if binary package BUILD_TIME is
3061 # non-empty, in order to avoid cases like to
3062 # bug #306659 where BUILD_TIME fields are missing
3063 # in local and/or remote Packages file.
3065 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3066 except (KeyError, ValueError):
3070 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3071 except (KeyError, ValueError):
3072 installed_timestamp = 0
3074 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3075 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3076 if built_timestamp and \
3077 built_timestamp > installed_timestamp and \
3078 built_timestamp >= minimal_timestamp:
3079 return built_pkg, existing_node
3081 #Don't care if the binary has an older BUILD_TIME than the installed
3082 #package. This is for closely tracking a binhost.
3083 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3085 if built_timestamp and \
3086 built_timestamp != installed_timestamp:
3087 return built_pkg, existing_node
3089 for pkg in matched_packages:
3090 if pkg.installed and pkg.invalid:
3091 matched_packages = [x for x in \
3092 matched_packages if x is not pkg]
3095 for pkg in matched_packages:
3096 if pkg.installed and self._pkg_visibility_check(pkg, \
3097 allow_unstable_keywords=allow_unstable_keywords,
3098 allow_license_changes=allow_license_changes):
3099 return pkg, existing_node
3101 bestmatch = portage.best(
3102 [pkg.cpv for pkg in matched_packages \
3103 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3104 allow_license_changes=allow_license_changes)])
3106 # all are masked, so ignore visibility
3107 bestmatch = portage.best(
3108 [pkg.cpv for pkg in matched_packages])
3109 matched_packages = [pkg for pkg in matched_packages \
3110 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3112 # ordered by type preference ("ebuild" type is the last resort)
3113 return matched_packages[-1], existing_node
3115 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3117 Select packages that have already been added to the graph or
3118 those that are installed and have not been scheduled for
3121 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3122 matches = graph_db.match_pkgs(atom)
3125 pkg = matches[-1] # highest match
3126 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3127 return pkg, in_graph
3129 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3131 Select packages that are installed.
3133 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3134 matches = vardb.match_pkgs(atom)
3137 pkg = matches[-1] # highest match
3138 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3139 return pkg, in_graph
3141 def _complete_graph(self, required_sets=None):
3143 Add any deep dependencies of required sets (args, system, world) that
3144 have not been pulled into the graph yet. This ensures that the graph
3145 is consistent such that initially satisfied deep dependencies are not
3146 broken in the new graph. Initially unsatisfied dependencies are
3147 irrelevant since we only want to avoid breaking dependencies that are
3148 initially satisfied.
3150 Since this method can consume enough time to disturb users, it is
3151 currently only enabled by the --complete-graph option.
3153 @param required_sets: contains required sets (currently only used
3154 for depclean and prune removal operations)
3155 @type required_sets: dict
3157 if "--buildpkgonly" in self._frozen_config.myopts or \
3158 "recurse" not in self._dynamic_config.myparams:
3161 if "complete" not in self._dynamic_config.myparams:
3162 # Skip this to avoid consuming enough time to disturb users.
3167 # Put the depgraph into a mode that causes it to only
3168 # select packages that have already been added to the
3169 # graph or those that are installed and have not been
3170 # scheduled for replacement. Also, toggle the "deep"
3171 # parameter so that all dependencies are traversed and
3173 self._select_atoms = self._select_atoms_from_graph
3174 if "remove" in self._dynamic_config.myparams:
3175 self._select_package = self._select_pkg_from_installed
3177 self._select_package = self._select_pkg_from_graph
3178 already_deep = self._dynamic_config.myparams.get("deep") is True
3179 if not already_deep:
3180 self._dynamic_config.myparams["deep"] = True
3182 # Invalidate the package selection cache, since
3183 # _select_package has just changed implementations.
3184 for trees in self._dynamic_config._filtered_trees.values():
3185 trees["porttree"].dbapi._clear_cache()
3187 args = self._dynamic_config._initial_arg_list[:]
3188 for root in self._frozen_config.roots:
3189 if root != self._frozen_config.target_root and \
3190 "remove" in self._dynamic_config.myparams:
3191 # Only pull in deps for the relevant root.
3193 depgraph_sets = self._dynamic_config.sets[root]
3194 required_set_names = self._frozen_config._required_set_names.copy()
3195 remaining_args = required_set_names.copy()
3196 if required_sets is None or root not in required_sets:
3199 # Removal actions may override sets with temporary
3200 # replacements that have had atoms removed in order
3201 # to implement --deselect behavior.
3202 required_set_names = set(required_sets[root])
3203 depgraph_sets.sets.clear()
3204 depgraph_sets.sets.update(required_sets[root])
3205 if "remove" not in self._dynamic_config.myparams and \
3206 root == self._frozen_config.target_root and \
3207 (already_deep or "empty" in self._dynamic_config.myparams):
3208 remaining_args.difference_update(depgraph_sets.sets)
3209 if not remaining_args and \
3210 not self._dynamic_config._ignored_deps and \
3211 not self._dynamic_config._dep_stack:
3213 root_config = self._frozen_config.roots[root]
3214 for s in required_set_names:
3215 pset = depgraph_sets.sets.get(s)
3217 pset = root_config.sets[s]
3218 atom = SETPREFIX + s
3219 args.append(SetArg(arg=atom, pset=pset,
3220 root_config=root_config))
3222 self._set_args(args)
3223 for arg in self._expand_set_args(args, add_to_digraph=True):
3224 for atom in arg.pset.getAtoms():
3225 self._dynamic_config._dep_stack.append(
3226 Dependency(atom=atom, root=arg.root_config.root,
3230 if self._dynamic_config._ignored_deps:
3231 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
3232 self._dynamic_config._ignored_deps = []
3233 if not self._create_graph(allow_unsatisfied=True):
3235 # Check the unsatisfied deps to see if any initially satisfied deps
3236 # will become unsatisfied due to an upgrade. Initially unsatisfied
3237 # deps are irrelevant since we only want to avoid breaking deps
3238 # that are initially satisfied.
3239 while self._dynamic_config._unsatisfied_deps:
3240 dep = self._dynamic_config._unsatisfied_deps.pop()
3241 vardb = self._frozen_config.roots[
3242 dep.root].trees["vartree"].dbapi
3243 matches = vardb.match_pkgs(dep.atom)
3245 self._dynamic_config._initially_unsatisfied_deps.append(dep)
3247 # An scheduled installation broke a deep dependency.
3248 # Add the installed package to the graph so that it
3249 # will be appropriately reported as a slot collision
3250 # (possibly solvable via backtracking).
3251 pkg = matches[-1] # highest match
3252 if not self._add_pkg(pkg, dep):
3254 if not self._create_graph(allow_unsatisfied=True):
3258 def _pkg(self, cpv, type_name, root_config, installed=False,
3259 onlydeps=False, myrepo = None):
3261 Get a package instance from the cache, or create a new
3262 one if necessary. Raises PackageNotFound from aux_get if it
3263 failures for some reason (package does not exist or is
3266 if type_name != "ebuild" and myrepo is None:
3270 if installed or onlydeps:
3271 operation = "nomerge"
3272 # Ensure that we use the specially optimized RootConfig instance
3273 # that refers to FakeVartree instead of the real vartree.
3274 root_config = self._frozen_config.roots[root_config.root]
3275 pkg = self._frozen_config._pkg_cache.get(
3276 (type_name, root_config.root, cpv, operation, myrepo))
3277 if pkg is None and onlydeps and not installed:
3278 # Maybe it already got pulled in as a "merge" node.
3279 pkg = self._dynamic_config.mydbapi[root_config.root].get(
3280 (type_name, root_config.root, cpv, 'merge', myrepo))
3283 tree_type = self.pkg_tree_map[type_name]
3284 db = root_config.trees[tree_type].dbapi
3285 db_keys = list(self._frozen_config._trees_orig[root_config.root][
3286 tree_type].dbapi._aux_cache_keys)
3288 if type_name == "ebuild" and myrepo is None:
3289 #We're asked to return a matching Package from any repo.
3290 for repo in db.getRepositories():
3291 if not db.cpv_exists(cpv, myrepo=repo):
3294 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=repo))
3299 raise portage.exception.PackageNotFound(cpv)
3302 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
3304 raise portage.exception.PackageNotFound(cpv)
3306 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
3307 installed=installed, metadata=metadata, onlydeps=onlydeps,
3308 root_config=root_config, type_name=type_name)
3310 if type_name == "ebuild":
3311 self._frozen_config._pkg_cache[pkg] = pkg
3313 self._frozen_config._pkg_cache[
3314 (pkg.type_name, pkg.root, pkg.cpv, pkg.operation, None)] = pkg
3316 #For installed and binary packages we don't care for the repo when it comes to
3317 #caching, because there can only be one cpv. So overwrite the repo key with type_name.
3318 #Make sure that .operation is computed.
3320 self._frozen_config._pkg_cache[
3321 (pkg.type_name, pkg.root, pkg.cpv, pkg.operation, pkg.type_name)] = pkg
3323 if not self._pkg_visibility_check(pkg) and \
3324 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
3325 slot_key = (pkg.root, pkg.slot_atom)
3326 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
3327 if other_pkg is None or pkg > other_pkg:
3328 self._frozen_config._highest_license_masked[slot_key] = pkg
3332 def _validate_blockers(self):
3333 """Remove any blockers from the digraph that do not match any of the
3334 packages within the graph. If necessary, create hard deps to ensure
3335 correct merge order such that mutually blocking packages are never
3336 installed simultaneously."""
3338 if "--buildpkgonly" in self._frozen_config.myopts or \
3339 "--nodeps" in self._frozen_config.myopts:
3342 complete = "complete" in self._dynamic_config.myparams
3343 deep = "deep" in self._dynamic_config.myparams
3346 # Pull in blockers from all installed packages that haven't already
3347 # been pulled into the depgraph. This is not enabled by default
3348 # due to the performance penalty that is incurred by all the
3349 # additional dep_check calls that are required.
3351 # For installed packages, always ignore blockers from DEPEND since
3352 # only runtime dependencies should be relevant for packages that
3353 # are already built.
3354 dep_keys = ["RDEPEND", "PDEPEND"]
3355 for myroot in self._frozen_config.trees:
3356 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
3357 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
3358 pkgsettings = self._frozen_config.pkgsettings[myroot]
3359 root_config = self._frozen_config.roots[myroot]
3360 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
3361 final_db = self._dynamic_config.mydbapi[myroot]
3363 blocker_cache = BlockerCache(myroot, vardb)
3364 stale_cache = set(blocker_cache)
3367 stale_cache.discard(cpv)
3368 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
3370 pkg in self._dynamic_config._traversed_pkg_deps
3372 # Check for masked installed packages. Only warn about
3373 # packages that are in the graph in order to avoid warning
3374 # about those that will be automatically uninstalled during
3375 # the merge process or by --depclean. Always warn about
3376 # packages masked by license, since the user likely wants
3377 # to adjust ACCEPT_LICENSE.
3379 if not self._pkg_visibility_check(pkg) and \
3380 (pkg_in_graph or 'LICENSE' in pkg.masks):
3381 self._dynamic_config._masked_installed.add(pkg)
3383 self._check_masks(pkg)
3385 blocker_atoms = None
3391 self._dynamic_config._blocker_parents.child_nodes(pkg))
3396 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
3400 # Select just the runtime blockers.
3401 blockers = [blocker for blocker in blockers \
3402 if blocker.priority.runtime or \
3403 blocker.priority.runtime_post]
3404 if blockers is not None:
3405 blockers = set(blocker.atom for blocker in blockers)
3407 # If this node has any blockers, create a "nomerge"
3408 # node for it so that they can be enforced.
3409 self._spinner_update()
3410 blocker_data = blocker_cache.get(cpv)
3411 if blocker_data is not None and \
3412 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3415 # If blocker data from the graph is available, use
3416 # it to validate the cache and update the cache if
3418 if blocker_data is not None and \
3419 blockers is not None:
3420 if not blockers.symmetric_difference(
3421 blocker_data.atoms):
3425 if blocker_data is None and \
3426 blockers is not None:
3427 # Re-use the blockers from the graph.
3428 blocker_atoms = sorted(blockers)
3429 counter = long(pkg.metadata["COUNTER"])
3431 blocker_cache.BlockerData(counter, blocker_atoms)
3432 blocker_cache[pkg.cpv] = blocker_data
3436 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
3438 # Use aux_get() to trigger FakeVartree global
3439 # updates on *DEPEND when appropriate.
3440 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3441 # It is crucial to pass in final_db here in order to
3442 # optimize dep_check calls by eliminating atoms via
3443 # dep_wordreduce and dep_eval calls.
3445 success, atoms = portage.dep_check(depstr,
3446 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
3447 trees=self._dynamic_config._graph_trees, myroot=myroot)
3450 except Exception as e:
3451 # This is helpful, for example, if a ValueError
3452 # is thrown from cpv_expand due to multiple
3453 # matches (this can happen if an atom lacks a
3455 show_invalid_depstring_notice(
3456 pkg, depstr, str(e))
3460 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3461 if replacement_pkg and \
3462 replacement_pkg[0].operation == "merge":
3463 # This package is being replaced anyway, so
3464 # ignore invalid dependencies so as not to
3465 # annoy the user too much (otherwise they'd be
3466 # forced to manually unmerge it first).
3468 show_invalid_depstring_notice(pkg, depstr, atoms)
3470 blocker_atoms = [myatom for myatom in atoms \
3472 blocker_atoms.sort()
3473 counter = long(pkg.metadata["COUNTER"])
3474 blocker_cache[cpv] = \
3475 blocker_cache.BlockerData(counter, blocker_atoms)
3478 for atom in blocker_atoms:
3479 blocker = Blocker(atom=atom,
3480 eapi=pkg.metadata["EAPI"],
3481 priority=self._priority(runtime=True),
3483 self._dynamic_config._blocker_parents.add(blocker, pkg)
3484 except portage.exception.InvalidAtom as e:
3485 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3486 show_invalid_depstring_notice(
3487 pkg, depstr, "Invalid Atom: %s" % (e,))
3489 for cpv in stale_cache:
3490 del blocker_cache[cpv]
3491 blocker_cache.flush()
3494 # Discard any "uninstall" tasks scheduled by previous calls
3495 # to this method, since those tasks may not make sense given
3496 # the current graph state.
3497 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
3498 if previous_uninstall_tasks:
3499 self._dynamic_config._blocker_uninstalls = digraph()
3500 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
3502 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
3503 self._spinner_update()
3504 root_config = self._frozen_config.roots[blocker.root]
3505 virtuals = root_config.settings.getvirtuals()
3506 myroot = blocker.root
3507 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
3508 final_db = self._dynamic_config.mydbapi[myroot]
3510 provider_virtual = False
3511 if blocker.cp in virtuals and \
3512 not self._have_new_virt(blocker.root, blocker.cp):
3513 provider_virtual = True
3515 # Use this to check PROVIDE for each matched package
3517 atom_set = InternalPackageSet(
3518 initial_atoms=[blocker.atom])
3520 if provider_virtual:
3522 for provider_entry in virtuals[blocker.cp]:
3523 atoms.append(Atom(blocker.atom.replace(
3524 blocker.cp, provider_entry.cp, 1)))
3526 atoms = [blocker.atom]
3528 blocked_initial = set()
3530 for pkg in initial_db.match_pkgs(atom):
3531 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3532 blocked_initial.add(pkg)
3534 blocked_final = set()
3536 for pkg in final_db.match_pkgs(atom):
3537 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
3538 blocked_final.add(pkg)
3540 if not blocked_initial and not blocked_final:
3541 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
3542 self._dynamic_config._blocker_parents.remove(blocker)
3543 # Discard any parents that don't have any more blockers.
3544 for pkg in parent_pkgs:
3545 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
3546 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
3547 self._dynamic_config._blocker_parents.remove(pkg)
3549 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
3550 unresolved_blocks = False
3551 depends_on_order = set()
3552 for pkg in blocked_initial:
3553 if pkg.slot_atom == parent.slot_atom and \
3554 not blocker.atom.blocker.overlap.forbid:
3555 # New !!atom blockers do not allow temporary
3556 # simulaneous installation, so unlike !atom
3557 # blockers, !!atom blockers aren't ignored
3558 # when they match other packages occupying
3561 if parent.installed:
3562 # Two currently installed packages conflict with
3563 # eachother. Ignore this case since the damage
3564 # is already done and this would be likely to
3565 # confuse users if displayed like a normal blocker.
3568 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3570 if parent.operation == "merge":
3571 # Maybe the blocked package can be replaced or simply
3572 # unmerged to resolve this block.
3573 depends_on_order.add((pkg, parent))
3575 # None of the above blocker resolutions techniques apply,
3576 # so apparently this one is unresolvable.
3577 unresolved_blocks = True
3578 for pkg in blocked_final:
3579 if pkg.slot_atom == parent.slot_atom and \
3580 not blocker.atom.blocker.overlap.forbid:
3581 # New !!atom blockers do not allow temporary
3582 # simulaneous installation, so unlike !atom
3583 # blockers, !!atom blockers aren't ignored
3584 # when they match other packages occupying
3587 if parent.operation == "nomerge" and \
3588 pkg.operation == "nomerge":
3589 # This blocker will be handled the next time that a
3590 # merge of either package is triggered.
3593 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
3595 # Maybe the blocking package can be
3596 # unmerged to resolve this block.
3597 if parent.operation == "merge" and pkg.installed:
3598 depends_on_order.add((pkg, parent))
3600 elif parent.operation == "nomerge":
3601 depends_on_order.add((parent, pkg))
3603 # None of the above blocker resolutions techniques apply,
3604 # so apparently this one is unresolvable.
3605 unresolved_blocks = True
3607 # Make sure we don't unmerge any package that have been pulled
3609 if not unresolved_blocks and depends_on_order:
3610 for inst_pkg, inst_task in depends_on_order:
3611 if self._dynamic_config.digraph.contains(inst_pkg) and \
3612 self._dynamic_config.digraph.parent_nodes(inst_pkg):
3613 unresolved_blocks = True
3616 if not unresolved_blocks and depends_on_order:
3617 for inst_pkg, inst_task in depends_on_order:
3618 uninst_task = Package(built=inst_pkg.built,
3619 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3620 metadata=inst_pkg.metadata,
3621 operation="uninstall",
3622 root_config=inst_pkg.root_config,
3623 type_name=inst_pkg.type_name)
3624 # Enforce correct merge order with a hard dep.
3625 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
3626 priority=BlockerDepPriority.instance)
3627 # Count references to this blocker so that it can be
3628 # invalidated after nodes referencing it have been
3630 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
3631 if not unresolved_blocks and not depends_on_order:
3632 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
3633 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
3634 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
3635 self._dynamic_config._blocker_parents.remove(blocker)
3636 if not self._dynamic_config._blocker_parents.child_nodes(parent):
3637 self._dynamic_config._blocker_parents.remove(parent)
3638 if unresolved_blocks:
3639 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
3643 def _accept_blocker_conflicts(self):
3645 for x in ("--buildpkgonly", "--fetchonly",
3646 "--fetch-all-uri", "--nodeps"):
3647 if x in self._frozen_config.myopts:
3652 def _merge_order_bias(self, mygraph):
3654 For optimal leaf node selection, promote deep system runtime deps and
3655 order nodes from highest to lowest overall reference count.
3659 for node in mygraph.order:
3660 node_info[node] = len(mygraph.parent_nodes(node))
3661 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
3663 def cmp_merge_preference(node1, node2):
3665 if node1.operation == 'uninstall':
3666 if node2.operation == 'uninstall':
3670 if node2.operation == 'uninstall':
3671 if node1.operation == 'uninstall':
3675 node1_sys = node1 in deep_system_deps
3676 node2_sys = node2 in deep_system_deps
3677 if node1_sys != node2_sys:
3682 return node_info[node2] - node_info[node1]
3684 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
3686 def altlist(self, reversed=False):
3688 while self._dynamic_config._serialized_tasks_cache is None:
3689 self._resolve_conflicts()
3691 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
3692 self._serialize_tasks()
3693 except self._serialize_tasks_retry:
3696 retlist = self._dynamic_config._serialized_tasks_cache[:]
3701 def schedulerGraph(self):
3703 The scheduler graph is identical to the normal one except that
3704 uninstall edges are reversed in specific cases that require
3705 conflicting packages to be temporarily installed simultaneously.
3706 This is intended for use by the Scheduler in it's parallelization
3707 logic. It ensures that temporary simultaneous installation of
3708 conflicting packages is avoided when appropriate (especially for
3709 !!atom blockers), but allowed in specific cases that require it.
3711 Note that this method calls break_refs() which alters the state of
3712 internal Package instances such that this depgraph instance should
3713 not be used to perform any more calculations.
3716 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
3717 mergelist = self.altlist()
3718 self.break_refs(mergelist)
3719 self.break_refs(self._dynamic_config._scheduler_graph.order)
3721 # Break DepPriority.satisfied attributes which reference
3722 # installed Package instances.
3723 for parents, children, node in \
3724 self._dynamic_config._scheduler_graph.nodes.values():
3725 for priorities in chain(parents.values(), children.values()):
3726 for priority in priorities:
3727 if priority.satisfied:
3728 priority.satisfied = True
3730 pkg_cache = self._frozen_config._pkg_cache
3731 graph = self._dynamic_config._scheduler_graph
3732 trees = self._frozen_config.trees
3733 pruned_pkg_cache = {}
3734 for pkg in pkg_cache:
3735 if pkg in graph or \
3736 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
3737 pruned_pkg_cache[pkg] = pkg
3740 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
3741 self.break_refs(trees[root]['vartree'].dbapi)
3743 self.break_refs(pruned_pkg_cache)
3745 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
3749 def break_refs(self, nodes):
3751 Take a mergelist like that returned from self.altlist() and
3752 break any references that lead back to the depgraph. This is
3753 useful if you want to hold references to packages without
3754 also holding the depgraph on the heap.
3757 if hasattr(node, "root_config"):
3758 # The FakeVartree references the _package_cache which
3759 # references the depgraph. So that Package instances don't
3760 # hold the depgraph and FakeVartree on the heap, replace
3761 # the RootConfig that references the FakeVartree with the
3762 # original RootConfig instance which references the actual
3764 node.root_config = \
3765 self._frozen_config._trees_orig[node.root_config.root]["root_config"]
3767 def _resolve_conflicts(self):
3768 if not self._complete_graph():
3769 raise self._unknown_internal_error()
3771 if not self._validate_blockers():
3772 raise self._unknown_internal_error()
3774 if self._dynamic_config._slot_collision_info:
3775 self._process_slot_conflicts()
3777 def _serialize_tasks(self):
3779 if "--debug" in self._frozen_config.myopts:
3780 writemsg("\ndigraph:\n\n", noiselevel=-1)
3781 self._dynamic_config.digraph.debug_print()
3782 writemsg("\n", noiselevel=-1)
3784 scheduler_graph = self._dynamic_config.digraph.copy()
3786 if '--nodeps' in self._frozen_config.myopts:
3787 # Preserve the package order given on the command line.
3788 return ([node for node in scheduler_graph \
3789 if isinstance(node, Package) \
3790 and node.operation == 'merge'], scheduler_graph)
3792 mygraph=self._dynamic_config.digraph.copy()
3793 # Prune "nomerge" root nodes if nothing depends on them, since
3794 # otherwise they slow down merge order calculation. Don't remove
3795 # non-root nodes since they help optimize merge order in some cases
3796 # such as revdep-rebuild.
3797 removed_nodes = set()
3799 for node in mygraph.root_nodes():
3800 if not isinstance(node, Package) or \
3801 node.installed or node.onlydeps:
3802 removed_nodes.add(node)
3804 self._spinner_update()
3805 mygraph.difference_update(removed_nodes)
3806 if not removed_nodes:
3808 removed_nodes.clear()
3809 self._merge_order_bias(mygraph)
3810 def cmp_circular_bias(n1, n2):
3812 RDEPEND is stronger than PDEPEND and this function
3813 measures such a strength bias within a circular
3814 dependency relationship.
3816 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3817 ignore_priority=priority_range.ignore_medium_soft)
3818 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3819 ignore_priority=priority_range.ignore_medium_soft)
3820 if n1_n2_medium == n2_n1_medium:
3825 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
3827 # Contains uninstall tasks that have been scheduled to
3828 # occur after overlapping blockers have been installed.
3829 scheduled_uninstalls = set()
3830 # Contains any Uninstall tasks that have been ignored
3831 # in order to avoid the circular deps code path. These
3832 # correspond to blocker conflicts that could not be
3834 ignored_uninstall_tasks = set()
3835 have_uninstall_task = False
3836 complete = "complete" in self._dynamic_config.myparams
3839 def get_nodes(**kwargs):
3841 Returns leaf nodes excluding Uninstall instances
3842 since those should be executed as late as possible.
3844 return [node for node in mygraph.leaf_nodes(**kwargs) \
3845 if isinstance(node, Package) and \
3846 (node.operation != "uninstall" or \
3847 node in scheduled_uninstalls)]
3849 # sys-apps/portage needs special treatment if ROOT="/"
3850 running_root = self._frozen_config._running_root.root
3851 runtime_deps = InternalPackageSet(
3852 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3853 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
3854 PORTAGE_PACKAGE_ATOM)
3855 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
3856 PORTAGE_PACKAGE_ATOM)
3859 running_portage = running_portage[0]
3861 running_portage = None
3863 if replacement_portage:
3864 replacement_portage = replacement_portage[0]
3866 replacement_portage = None
3868 if replacement_portage == running_portage:
3869 replacement_portage = None
3871 if replacement_portage is not None and \
3872 (running_portage is None or \
3873 running_portage.cpv != replacement_portage.cpv or \
3874 '9999' in replacement_portage.cpv or \
3875 'git' in replacement_portage.inherited):
3876 # update from running_portage to replacement_portage asap
3877 asap_nodes.append(replacement_portage)
3879 if running_portage is not None:
3881 portage_rdepend = self._select_atoms_highest_available(
3882 running_root, running_portage.metadata["RDEPEND"],
3883 myuse=self._pkg_use_enabled(running_portage),
3884 parent=running_portage, strict=False)
3885 except portage.exception.InvalidDependString as e:
3886 portage.writemsg("!!! Invalid RDEPEND in " + \
3887 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3888 (running_root, running_portage.cpv, e), noiselevel=-1)
3890 portage_rdepend = {running_portage : []}
3891 for atoms in portage_rdepend.values():
3892 runtime_deps.update(atom for atom in atoms \
3893 if not atom.blocker)
3895 # Merge libc asap, in order to account for implicit
3896 # dependencies. See bug #303567.
3897 for root in (running_root,):
3898 libc_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
3899 portage.const.LIBC_PACKAGE_ATOM)
3901 libc_pkg = libc_pkg[0]
3902 if libc_pkg.operation == 'merge':
3903 # Only add a dep when the version changes.
3904 if not libc_pkg.root_config.trees[
3905 'vartree'].dbapi.cpv_exists(libc_pkg.cpv):
3907 # If there's also an os-headers upgrade, we need to
3908 # pull that in first. See bug #328317.
3909 os_headers_pkg = self._dynamic_config.mydbapi[root].match_pkgs(
3910 portage.const.OS_HEADERS_PACKAGE_ATOM)
3912 os_headers_pkg = os_headers_pkg[0]
3913 if os_headers_pkg.operation == 'merge':
3914 # Only add a dep when the version changes.
3915 if not os_headers_pkg.root_config.trees[
3916 'vartree'].dbapi.cpv_exists(os_headers_pkg.cpv):
3917 asap_nodes.append(os_headers_pkg)
3919 asap_nodes.append(libc_pkg)
3921 def gather_deps(ignore_priority, mergeable_nodes,
3922 selected_nodes, node):
3924 Recursively gather a group of nodes that RDEPEND on
3925 eachother. This ensures that they are merged as a group
3926 and get their RDEPENDs satisfied as soon as possible.
3928 if node in selected_nodes:
3930 if node not in mergeable_nodes:
3932 if node == replacement_portage and \
3933 mygraph.child_nodes(node,
3934 ignore_priority=priority_range.ignore_medium_soft):
3935 # Make sure that portage always has all of it's
3936 # RDEPENDs installed first.
3938 selected_nodes.add(node)
3939 for child in mygraph.child_nodes(node,
3940 ignore_priority=ignore_priority):
3941 if not gather_deps(ignore_priority,
3942 mergeable_nodes, selected_nodes, child):
3946 def ignore_uninst_or_med(priority):
3947 if priority is BlockerDepPriority.instance:
3949 return priority_range.ignore_medium(priority)
3951 def ignore_uninst_or_med_soft(priority):
3952 if priority is BlockerDepPriority.instance:
3954 return priority_range.ignore_medium_soft(priority)
3956 tree_mode = "--tree" in self._frozen_config.myopts
3957 # Tracks whether or not the current iteration should prefer asap_nodes
3958 # if available. This is set to False when the previous iteration
3959 # failed to select any nodes. It is reset whenever nodes are
3960 # successfully selected.
3963 # Controls whether or not the current iteration should drop edges that
3964 # are "satisfied" by installed packages, in order to solve circular
3965 # dependencies. The deep runtime dependencies of installed packages are
3966 # not checked in this case (bug #199856), so it must be avoided
3967 # whenever possible.
3968 drop_satisfied = False
3970 # State of variables for successive iterations that loosen the
3971 # criteria for node selection.
3973 # iteration prefer_asap drop_satisfied
3978 # If no nodes are selected on the last iteration, it is due to
3979 # unresolved blockers or circular dependencies.
3981 while not mygraph.empty():
3982 self._spinner_update()
3983 selected_nodes = None
3984 ignore_priority = None
3985 if drop_satisfied or (prefer_asap and asap_nodes):
3986 priority_range = DepPrioritySatisfiedRange
3988 priority_range = DepPriorityNormalRange
3989 if prefer_asap and asap_nodes:
3990 # ASAP nodes are merged before their soft deps. Go ahead and
3991 # select root nodes here if necessary, since it's typical for
3992 # the parent to have been removed from the graph already.
3993 asap_nodes = [node for node in asap_nodes \
3994 if mygraph.contains(node)]
3995 for node in asap_nodes:
3996 if not mygraph.child_nodes(node,
3997 ignore_priority=priority_range.ignore_soft):
3998 selected_nodes = [node]
3999 asap_nodes.remove(node)
4001 if not selected_nodes and \
4002 not (prefer_asap and asap_nodes):
4003 for i in range(priority_range.NONE,
4004 priority_range.MEDIUM_SOFT + 1):
4005 ignore_priority = priority_range.ignore_priority[i]
4006 nodes = get_nodes(ignore_priority=ignore_priority)
4008 # If there is a mixture of merges and uninstalls,
4009 # do the uninstalls first.
4011 good_uninstalls = []
4013 if node.operation == "uninstall":
4014 good_uninstalls.append(node)
4017 nodes = good_uninstalls
4021 if ignore_priority is None and not tree_mode:
4022 # Greedily pop all of these nodes since no
4023 # relationship has been ignored. This optimization
4024 # destroys --tree output, so it's disabled in tree
4026 selected_nodes = nodes
4028 # For optimal merge order:
4029 # * Only pop one node.
4030 # * Removing a root node (node without a parent)
4031 # will not produce a leaf node, so avoid it.
4032 # * It's normal for a selected uninstall to be a
4033 # root node, so don't check them for parents.
4035 if node.operation == "uninstall" or \
4036 mygraph.parent_nodes(node):
4037 selected_nodes = [node]
4043 if not selected_nodes:
4044 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4046 mergeable_nodes = set(nodes)
4047 if prefer_asap and asap_nodes:
4049 for i in range(priority_range.SOFT,
4050 priority_range.MEDIUM_SOFT + 1):
4051 ignore_priority = priority_range.ignore_priority[i]
4053 if not mygraph.parent_nodes(node):
4055 selected_nodes = set()
4056 if gather_deps(ignore_priority,
4057 mergeable_nodes, selected_nodes, node):
4060 selected_nodes = None
4064 if prefer_asap and asap_nodes and not selected_nodes:
4065 # We failed to find any asap nodes to merge, so ignore
4066 # them for the next iteration.
4070 if selected_nodes and ignore_priority is not None:
4071 # Try to merge ignored medium_soft deps as soon as possible
4072 # if they're not satisfied by installed packages.
4073 for node in selected_nodes:
4074 children = set(mygraph.child_nodes(node))
4075 soft = children.difference(
4076 mygraph.child_nodes(node,
4077 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4078 medium_soft = children.difference(
4079 mygraph.child_nodes(node,
4081 DepPrioritySatisfiedRange.ignore_medium_soft))
4082 medium_soft.difference_update(soft)
4083 for child in medium_soft:
4084 if child in selected_nodes:
4086 if child in asap_nodes:
4088 asap_nodes.append(child)
4090 if selected_nodes and len(selected_nodes) > 1:
4091 if not isinstance(selected_nodes, list):
4092 selected_nodes = list(selected_nodes)
4093 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4095 if not selected_nodes and not myblocker_uninstalls.is_empty():
4096 # An Uninstall task needs to be executed in order to
4097 # avoid conflict if possible.
4100 priority_range = DepPrioritySatisfiedRange
4102 priority_range = DepPriorityNormalRange
4104 mergeable_nodes = get_nodes(
4105 ignore_priority=ignore_uninst_or_med)
4107 min_parent_deps = None
4110 # FIXME: This loop can be extremely slow when
4111 # there of lots of blockers to solve
4112 # (especially the gather_deps part).
4113 for task in myblocker_uninstalls.leaf_nodes():
4114 # Do some sanity checks so that system or world packages
4115 # don't get uninstalled inappropriately here (only really
4116 # necessary when --complete-graph has not been enabled).
4118 if task in ignored_uninstall_tasks:
4121 if task in scheduled_uninstalls:
4122 # It's been scheduled but it hasn't
4123 # been executed yet due to dependence
4124 # on installation of blocking packages.
4127 root_config = self._frozen_config.roots[task.root]
4128 inst_pkg = self._pkg(task.cpv, "installed", root_config,
4131 if self._dynamic_config.digraph.contains(inst_pkg):
4134 forbid_overlap = False
4135 heuristic_overlap = False
4136 for blocker in myblocker_uninstalls.parent_nodes(task):
4137 if not eapi_has_strong_blocks(blocker.eapi):
4138 heuristic_overlap = True
4139 elif blocker.atom.blocker.overlap.forbid:
4140 forbid_overlap = True
4142 if forbid_overlap and running_root == task.root:
4145 if heuristic_overlap and running_root == task.root:
4146 # Never uninstall sys-apps/portage or it's essential
4147 # dependencies, except through replacement.
4149 runtime_dep_atoms = \
4150 list(runtime_deps.iterAtomsForPackage(task))
4151 except portage.exception.InvalidDependString as e:
4152 portage.writemsg("!!! Invalid PROVIDE in " + \
4153 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4154 (task.root, task.cpv, e), noiselevel=-1)
4158 # Don't uninstall a runtime dep if it appears
4159 # to be the only suitable one installed.
4161 vardb = root_config.trees["vartree"].dbapi
4162 for atom in runtime_dep_atoms:
4163 other_version = None
4164 for pkg in vardb.match_pkgs(atom):
4165 if pkg.cpv == task.cpv and \
4166 pkg.metadata["COUNTER"] == \
4167 task.metadata["COUNTER"]:
4171 if other_version is None:
4177 # For packages in the system set, don't take
4178 # any chances. If the conflict can't be resolved
4179 # by a normal replacement operation then abort.
4182 for atom in root_config.sets[
4183 "system"].iterAtomsForPackage(task):
4186 except portage.exception.InvalidDependString as e:
4187 portage.writemsg("!!! Invalid PROVIDE in " + \
4188 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4189 (task.root, task.cpv, e), noiselevel=-1)
4195 # Note that the world check isn't always
4196 # necessary since self._complete_graph() will
4197 # add all packages from the system and world sets to the
4198 # graph. This just allows unresolved conflicts to be
4199 # detected as early as possible, which makes it possible
4200 # to avoid calling self._complete_graph() when it is
4201 # unnecessary due to blockers triggering an abortion.
4203 # For packages in the world set, go ahead an uninstall
4204 # when necessary, as long as the atom will be satisfied
4205 # in the final state.
4206 graph_db = self._dynamic_config.mydbapi[task.root]
4209 for atom in root_config.sets[
4210 "selected"].iterAtomsForPackage(task):
4212 for pkg in graph_db.match_pkgs(atom):
4219 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
4221 except portage.exception.InvalidDependString as e:
4222 portage.writemsg("!!! Invalid PROVIDE in " + \
4223 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4224 (task.root, task.cpv, e), noiselevel=-1)
4230 # Check the deps of parent nodes to ensure that
4231 # the chosen task produces a leaf node. Maybe
4232 # this can be optimized some more to make the
4233 # best possible choice, but the current algorithm
4234 # is simple and should be near optimal for most
4236 self._spinner_update()
4237 mergeable_parent = False
4239 for parent in mygraph.parent_nodes(task):
4240 parent_deps.update(mygraph.child_nodes(parent,
4241 ignore_priority=priority_range.ignore_medium_soft))
4242 if parent in mergeable_nodes and \
4243 gather_deps(ignore_uninst_or_med_soft,
4244 mergeable_nodes, set(), parent):
4245 mergeable_parent = True
4247 if not mergeable_parent:
4250 parent_deps.remove(task)
4251 if min_parent_deps is None or \
4252 len(parent_deps) < min_parent_deps:
4253 min_parent_deps = len(parent_deps)
4256 if uninst_task is not None:
4257 # The uninstall is performed only after blocking
4258 # packages have been merged on top of it. File
4259 # collisions between blocking packages are detected
4260 # and removed from the list of files to be uninstalled.
4261 scheduled_uninstalls.add(uninst_task)
4262 parent_nodes = mygraph.parent_nodes(uninst_task)
4264 # Reverse the parent -> uninstall edges since we want
4265 # to do the uninstall after blocking packages have
4266 # been merged on top of it.
4267 mygraph.remove(uninst_task)
4268 for blocked_pkg in parent_nodes:
4269 mygraph.add(blocked_pkg, uninst_task,
4270 priority=BlockerDepPriority.instance)
4271 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4272 scheduler_graph.add(blocked_pkg, uninst_task,
4273 priority=BlockerDepPriority.instance)
4275 # Sometimes a merge node will render an uninstall
4276 # node unnecessary (due to occupying the same SLOT),
4277 # and we want to avoid executing a separate uninstall
4278 # task in that case.
4279 slot_node = self._dynamic_config.mydbapi[uninst_task.root
4280 ].match_pkgs(uninst_task.slot_atom)
4282 slot_node[0].operation == "merge":
4283 mygraph.add(slot_node[0], uninst_task,
4284 priority=BlockerDepPriority.instance)
4286 # Reset the state variables for leaf node selection and
4287 # continue trying to select leaf nodes.
4289 drop_satisfied = False
4292 if not selected_nodes:
4293 # Only select root nodes as a last resort. This case should
4294 # only trigger when the graph is nearly empty and the only
4295 # remaining nodes are isolated (no parents or children). Since
4296 # the nodes must be isolated, ignore_priority is not needed.
4297 selected_nodes = get_nodes()
4299 if not selected_nodes and not drop_satisfied:
4300 drop_satisfied = True
4303 if not selected_nodes and not myblocker_uninstalls.is_empty():
4304 # If possible, drop an uninstall task here in order to avoid
4305 # the circular deps code path. The corresponding blocker will
4306 # still be counted as an unresolved conflict.
4308 for node in myblocker_uninstalls.leaf_nodes():
4310 mygraph.remove(node)
4315 ignored_uninstall_tasks.add(node)
4318 if uninst_task is not None:
4319 # Reset the state variables for leaf node selection and
4320 # continue trying to select leaf nodes.
4322 drop_satisfied = False
4325 if not selected_nodes:
4326 self._dynamic_config._circular_deps_for_display = mygraph
4327 raise self._unknown_internal_error()
4329 # At this point, we've succeeded in selecting one or more nodes, so
4330 # reset state variables for leaf node selection.
4332 drop_satisfied = False
4334 mygraph.difference_update(selected_nodes)
4336 for node in selected_nodes:
4337 if isinstance(node, Package) and \
4338 node.operation == "nomerge":
4341 # Handle interactions between blockers
4342 # and uninstallation tasks.
4343 solved_blockers = set()
4345 if isinstance(node, Package) and \
4346 "uninstall" == node.operation:
4347 have_uninstall_task = True
4350 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
4351 previous_cpv = vardb.match(node.slot_atom)
4353 # The package will be replaced by this one, so remove
4354 # the corresponding Uninstall task if necessary.
4355 previous_cpv = previous_cpv[0]
4357 ("installed", node.root, previous_cpv, "uninstall")
4359 mygraph.remove(uninst_task)
4363 if uninst_task is not None and \
4364 uninst_task not in ignored_uninstall_tasks and \
4365 myblocker_uninstalls.contains(uninst_task):
4366 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4367 myblocker_uninstalls.remove(uninst_task)
4368 # Discard any blockers that this Uninstall solves.
4369 for blocker in blocker_nodes:
4370 if not myblocker_uninstalls.child_nodes(blocker):
4371 myblocker_uninstalls.remove(blocker)
4373 self._dynamic_config._unsolvable_blockers:
4374 solved_blockers.add(blocker)
4376 retlist.append(node)
4378 if (isinstance(node, Package) and \
4379 "uninstall" == node.operation) or \
4380 (uninst_task is not None and \
4381 uninst_task in scheduled_uninstalls):
4382 # Include satisfied blockers in the merge list
4383 # since the user might be interested and also
4384 # it serves as an indicator that blocking packages
4385 # will be temporarily installed simultaneously.
4386 for blocker in solved_blockers:
4387 retlist.append(blocker)
4389 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
4390 for node in myblocker_uninstalls.root_nodes():
4391 unsolvable_blockers.add(node)
4393 # If any Uninstall tasks need to be executed in order
4394 # to avoid a conflict, complete the graph with any
4395 # dependencies that may have been initially
4396 # neglected (to ensure that unsafe Uninstall tasks
4397 # are properly identified and blocked from execution).
4398 if have_uninstall_task and \
4400 not unsolvable_blockers:
4401 self._dynamic_config.myparams["complete"] = True
4402 if '--debug' in self._frozen_config.myopts:
4404 msg.append("enabling 'complete' depgraph mode " + \
4405 "due to uninstall task(s):")
4407 for node in retlist:
4408 if isinstance(node, Package) and \
4409 node.operation == 'uninstall':
4410 msg.append("\t%s" % (node,))
4411 writemsg_level("\n%s\n" % \
4412 "".join("%s\n" % line for line in msg),
4413 level=logging.DEBUG, noiselevel=-1)
4414 raise self._serialize_tasks_retry("")
4416 # Set satisfied state on blockers, but not before the
4417 # above retry path, since we don't want to modify the
4418 # state in that case.
4419 for node in retlist:
4420 if isinstance(node, Blocker):
4421 node.satisfied = True
4423 for blocker in unsolvable_blockers:
4424 retlist.append(blocker)
4426 if unsolvable_blockers and \
4427 not self._accept_blocker_conflicts():
4428 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
4429 self._dynamic_config._serialized_tasks_cache = retlist[:]
4430 self._dynamic_config._scheduler_graph = scheduler_graph
4431 raise self._unknown_internal_error()
4433 if self._dynamic_config._slot_collision_info and \
4434 not self._accept_blocker_conflicts():
4435 self._dynamic_config._serialized_tasks_cache = retlist[:]
4436 self._dynamic_config._scheduler_graph = scheduler_graph
4437 raise self._unknown_internal_error()
4439 return retlist, scheduler_graph
4441 def _show_circular_deps(self, mygraph):
4442 self._dynamic_config._circular_dependency_handler = \
4443 circular_dependency_handler(self, mygraph)
4444 handler = self._dynamic_config._circular_dependency_handler
4446 self._frozen_config.myopts.pop("--quiet", None)
4447 self._frozen_config.myopts["--verbose"] = True
4448 self._frozen_config.myopts["--tree"] = True
4449 portage.writemsg("\n\n", noiselevel=-1)
4450 self.display(handler.merge_list)
4451 prefix = colorize("BAD", " * ")
4452 portage.writemsg("\n", noiselevel=-1)
4453 portage.writemsg(prefix + "Error: circular dependencies:\n",
4455 portage.writemsg("\n", noiselevel=-1)
4457 if handler.circular_dep_message is None or \
4458 "--debug" in self._frozen_config.myopts:
4459 handler.debug_print()
4460 portage.writemsg("\n", noiselevel=-1)
4462 if handler.circular_dep_message is not None:
4463 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
4465 suggestions = handler.suggestions
4467 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
4468 if len(suggestions) == 1:
4469 writemsg("by applying the following change:\n", noiselevel=-1)
4471 writemsg("by applying " + colorize("bold", "any of") + \
4472 " the following changes:\n", noiselevel=-1)
4473 writemsg("".join(suggestions), noiselevel=-1)
4474 writemsg("\nNote that this change can be reverted, once the package has" + \
4475 " been installed.\n", noiselevel=-1)
4476 if handler.large_cycle_count:
4477 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
4478 "Several changes might be required to resolve all cycles.\n" + \
4479 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
4481 writemsg("\n\n", noiselevel=-1)
4482 writemsg(prefix + "Note that circular dependencies " + \
4483 "can often be avoided by temporarily\n", noiselevel=-1)
4484 writemsg(prefix + "disabling USE flags that trigger " + \
4485 "optional dependencies.\n", noiselevel=-1)
4487 def _show_merge_list(self):
4488 if self._dynamic_config._serialized_tasks_cache is not None and \
4489 not (self._dynamic_config._displayed_list and \
4490 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
4491 self._dynamic_config._displayed_list == \
4492 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
4493 display_list = self._dynamic_config._serialized_tasks_cache[:]
4494 if "--tree" in self._frozen_config.myopts:
4495 display_list.reverse()
4496 self.display(display_list)
4498 def _show_unsatisfied_blockers(self, blockers):
4499 self._show_merge_list()
4500 msg = "Error: The above package list contains " + \
4501 "packages which cannot be installed " + \
4502 "at the same time on the same system."
4503 prefix = colorize("BAD", " * ")
4504 portage.writemsg("\n", noiselevel=-1)
4505 for line in textwrap.wrap(msg, 70):
4506 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4508 # Display the conflicting packages along with the packages
4509 # that pulled them in. This is helpful for troubleshooting
4510 # cases in which blockers don't solve automatically and
4511 # the reasons are not apparent from the normal merge list
4515 for blocker in blockers:
4516 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
4517 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
4518 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4519 if not parent_atoms:
4520 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
4521 if atom is not None:
4522 parent_atoms = set([("@selected", atom)])
4524 conflict_pkgs[pkg] = parent_atoms
4527 # Reduce noise by pruning packages that are only
4528 # pulled in by other conflict packages.
4530 for pkg, parent_atoms in conflict_pkgs.items():
4531 relevant_parent = False
4532 for parent, atom in parent_atoms:
4533 if parent not in conflict_pkgs:
4534 relevant_parent = True
4536 if not relevant_parent:
4537 pruned_pkgs.add(pkg)
4538 for pkg in pruned_pkgs:
4539 del conflict_pkgs[pkg]
4545 # Max number of parents shown, to avoid flooding the display.
4547 for pkg, parent_atoms in conflict_pkgs.items():
4551 # Prefer packages that are not directly involved in a conflict.
4552 for parent_atom in parent_atoms:
4553 if len(pruned_list) >= max_parents:
4555 parent, atom = parent_atom
4556 if parent not in conflict_pkgs:
4557 pruned_list.add(parent_atom)
4559 for parent_atom in parent_atoms:
4560 if len(pruned_list) >= max_parents:
4562 pruned_list.add(parent_atom)
4564 omitted_parents = len(parent_atoms) - len(pruned_list)
4565 msg.append(indent + "%s pulled in by\n" % pkg)
4567 for parent_atom in pruned_list:
4568 parent, atom = parent_atom
4569 msg.append(2*indent)
4570 if isinstance(parent,
4571 (PackageArg, AtomArg)):
4572 # For PackageArg and AtomArg types, it's
4573 # redundant to display the atom attribute.
4574 msg.append(str(parent))
4576 # Display the specific atom from SetArg or
4578 msg.append("%s required by %s" % (atom, parent))
4582 msg.append(2*indent)
4583 msg.append("(and %d more)\n" % omitted_parents)
4587 sys.stderr.write("".join(msg))
4590 if "--quiet" not in self._frozen_config.myopts:
4591 show_blocker_docs_link()
4593 def display(self, mylist, favorites=[], verbosity=None):
4595 # This is used to prevent display_problems() from
4596 # redundantly displaying this exact same merge list
4597 # again via _show_merge_list().
4598 self._dynamic_config._displayed_list = mylist
4600 return display(self, mylist, favorites, verbosity)
4602 def display_problems(self):
4604 Display problems with the dependency graph such as slot collisions.
4605 This is called internally by display() to show the problems _after_
4606 the merge list where it is most likely to be seen, but if display()
4607 is not going to be called then this method should be called explicitly
4608 to ensure that the user is notified of problems with the graph.
4610 All output goes to stderr, except for unsatisfied dependencies which
4611 go to stdout for parsing by programs such as autounmask.
4614 # Note that show_masked_packages() sends it's output to
4615 # stdout, and some programs such as autounmask parse the
4616 # output in cases when emerge bails out. However, when
4617 # show_masked_packages() is called for installed packages
4618 # here, the message is a warning that is more appropriate
4619 # to send to stderr, so temporarily redirect stdout to
4620 # stderr. TODO: Fix output code so there's a cleaner way
4621 # to redirect everything to stderr.
4626 sys.stdout = sys.stderr
4627 self._display_problems()
4633 # This goes to stdout for parsing by programs like autounmask.
4634 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
4635 self._show_unsatisfied_dep(*pargs, **kwargs)
4637 def _display_problems(self):
4638 if self._dynamic_config._circular_deps_for_display is not None:
4639 self._show_circular_deps(
4640 self._dynamic_config._circular_deps_for_display)
4642 # The user is only notified of a slot conflict if
4643 # there are no unresolvable blocker conflicts.
4644 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
4645 self._show_unsatisfied_blockers(
4646 self._dynamic_config._unsatisfied_blockers_for_display)
4647 elif self._dynamic_config._slot_collision_info:
4648 self._show_slot_collision_notice()
4650 self._show_missed_update()
4652 def get_dep_chain(pkg):
4653 traversed_nodes = set()
4658 all_parents = self._dynamic_config._parent_atoms
4659 while node is not None:
4660 traversed_nodes.add(node)
4661 if isinstance(node, DependencyArg):
4666 msg += _unicode_decode('required by %s') % (node,)
4667 elif node is not pkg:
4668 for ppkg, patom in all_parents[child]:
4670 atom = patom.unevaluated_atom
4674 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
4675 if priority.buildtime:
4676 dep_strings.add(node.metadata["DEPEND"])
4677 if priority.runtime:
4678 dep_strings.add(node.metadata["RDEPEND"])
4679 if priority.runtime_post:
4680 dep_strings.add(node.metadata["PDEPEND"])
4682 affecting_use = set()
4683 for dep_str in dep_strings:
4684 affecting_use.update(extract_affecting_use(dep_str, atom))
4686 #Don't show flags as 'affecting' if the user can't change them,
4687 pkgsettings = self._frozen_config.pkgsettings[node.root]
4688 pkgsettings.setcpv(node)
4689 affecting_use.difference_update(pkgsettings.usemask, \
4690 pkgsettings.useforce)
4695 for flag in affecting_use:
4696 if flag in self._pkg_use_enabled(node):
4699 usedep.append("-"+flag)
4700 pkg_name += "[%s]" % ",".join(usedep)
4706 msg += 'required by =%s' % pkg_name
4708 if node not in self._dynamic_config.digraph:
4709 # The parent is not in the graph due to backtracking.
4712 # When traversing to parents, prefer arguments over packages
4713 # since arguments are root nodes. Never traverse the same
4714 # package twice, in order to prevent an infinite loop.
4715 selected_parent = None
4716 for parent in self._dynamic_config.digraph.parent_nodes(node):
4717 if parent in traversed_nodes:
4719 if isinstance(parent, DependencyArg):
4720 if self._dynamic_config.digraph.parent_nodes(parent):
4721 selected_parent = parent
4728 msg += _unicode_decode(
4729 'required by %s (argument)') % (parent,)
4730 selected_parent = None
4733 selected_parent = parent
4735 node = selected_parent
4739 unstable_keyword_msg = []
4740 for pkg in self._dynamic_config._needed_unstable_keywords:
4741 self._show_merge_list()
4742 if pkg in self._dynamic_config.digraph:
4743 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4744 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
4745 use=self._pkg_use_enabled(pkg))
4746 for reason in mreasons:
4747 if reason.unmask_hint and \
4748 reason.unmask_hint.key == 'unstable keyword':
4749 keyword = reason.unmask_hint.value
4751 unstable_keyword_msg.append(get_dep_chain(pkg))
4752 unstable_keyword_msg.append("=%s %s\n" % (pkg.cpv, keyword))
4754 use_changes_msg = []
4755 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
4756 self._show_merge_list()
4757 if pkg in self._dynamic_config.digraph:
4758 changes = needed_use_config_change[1]
4760 for flag, state in changes.items():
4762 adjustments.append(flag)
4764 adjustments.append("-" + flag)
4765 use_changes_msg.append(get_dep_chain(pkg))
4766 use_changes_msg.append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
4769 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
4770 self._show_merge_list()
4771 if pkg in self._dynamic_config.digraph:
4772 license_msg.append(get_dep_chain(pkg))
4773 license_msg.append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
4775 if unstable_keyword_msg:
4776 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
4777 " are necessary to proceed:\n", noiselevel=-1)
4778 writemsg_stdout("".join(unstable_keyword_msg), noiselevel=-1)
4781 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
4782 " are necessary to proceed:\n", noiselevel=-1)
4783 writemsg_stdout("".join(use_changes_msg), noiselevel=-1)
4786 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
4787 " are necessary to proceed:\n", noiselevel=-1)
4788 writemsg_stdout("".join(license_msg), noiselevel=-1)
4790 # TODO: Add generic support for "set problem" handlers so that
4791 # the below warnings aren't special cases for world only.
4793 if self._dynamic_config._missing_args:
4794 world_problems = False
4795 if "world" in self._dynamic_config.sets[
4796 self._frozen_config.target_root].sets:
4797 # Filter out indirect members of world (from nested sets)
4798 # since only direct members of world are desired here.
4799 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
4800 for arg, atom in self._dynamic_config._missing_args:
4801 if arg.name in ("selected", "world") and atom in world_set:
4802 world_problems = True
4806 sys.stderr.write("\n!!! Problems have been " + \
4807 "detected with your world file\n")
4808 sys.stderr.write("!!! Please run " + \
4809 green("emaint --check world")+"\n\n")
4811 if self._dynamic_config._missing_args:
4812 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4813 " Ebuilds for the following packages are either all\n")
4814 sys.stderr.write(colorize("BAD", "!!!") + \
4815 " masked or don't exist:\n")
4816 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4817 self._dynamic_config._missing_args) + "\n")
4819 if self._dynamic_config._pprovided_args:
4821 for arg, atom in self._dynamic_config._pprovided_args:
4822 if isinstance(arg, SetArg):
4824 arg_atom = (atom, atom)
4827 arg_atom = (arg.arg, atom)
4828 refs = arg_refs.setdefault(arg_atom, [])
4829 if parent not in refs:
4832 msg.append(bad("\nWARNING: "))
4833 if len(self._dynamic_config._pprovided_args) > 1:
4834 msg.append("Requested packages will not be " + \
4835 "merged because they are listed in\n")
4837 msg.append("A requested package will not be " + \
4838 "merged because it is listed in\n")
4839 msg.append("package.provided:\n\n")
4840 problems_sets = set()
4841 for (arg, atom), refs in arg_refs.items():
4844 problems_sets.update(refs)
4846 ref_string = ", ".join(["'%s'" % name for name in refs])
4847 ref_string = " pulled in by " + ref_string
4848 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4850 if "selected" in problems_sets or "world" in problems_sets:
4851 msg.append("This problem can be solved in one of the following ways:\n\n")
4852 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4853 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4854 msg.append(" C) Remove offending entries from package.provided.\n\n")
4855 msg.append("The best course of action depends on the reason that an offending\n")
4856 msg.append("package.provided entry exists.\n\n")
4857 sys.stderr.write("".join(msg))
4859 masked_packages = []
4860 for pkg in self._dynamic_config._masked_license_updates:
4861 root_config = pkg.root_config
4862 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4863 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4864 masked_packages.append((root_config, pkgsettings,
4865 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
4867 writemsg("\n" + colorize("BAD", "!!!") + \
4868 " The following updates are masked by LICENSE changes:\n",
4870 show_masked_packages(masked_packages)
4872 writemsg("\n", noiselevel=-1)
4874 masked_packages = []
4875 for pkg in self._dynamic_config._masked_installed:
4876 root_config = pkg.root_config
4877 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4878 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
4879 masked_packages.append((root_config, pkgsettings,
4880 pkg.cpv, "installed", pkg.metadata, mreasons))
4882 writemsg("\n" + colorize("BAD", "!!!") + \
4883 " The following installed packages are masked:\n",
4885 show_masked_packages(masked_packages)
4887 writemsg("\n", noiselevel=-1)
4889 def saveNomergeFavorites(self):
4890 """Find atoms in favorites that are not in the mergelist and add them
4891 to the world file if necessary."""
4892 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4893 "--oneshot", "--onlydeps", "--pretend"):
4894 if x in self._frozen_config.myopts:
4896 root_config = self._frozen_config.roots[self._frozen_config.target_root]
4897 world_set = root_config.sets["selected"]
4899 world_locked = False
4900 if hasattr(world_set, "lock"):
4904 if hasattr(world_set, "load"):
4905 world_set.load() # maybe it's changed on disk
4907 args_set = self._dynamic_config.sets[
4908 self._frozen_config.target_root].sets['__non_set_args__']
4909 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
4910 added_favorites = set()
4911 for x in self._dynamic_config._set_nodes:
4912 pkg_type, root, pkg_key, pkg_status, pkg_repo = x
4913 if pkg_status != "nomerge":
4917 myfavkey = create_world_atom(x, args_set, root_config)
4919 if myfavkey in added_favorites:
4921 added_favorites.add(myfavkey)
4922 except portage.exception.InvalidDependString as e:
4923 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4924 (pkg_key, str(e)), noiselevel=-1)
4925 writemsg("!!! see '%s'\n\n" % os.path.join(
4926 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4929 for arg in self._dynamic_config._initial_arg_list:
4930 if not isinstance(arg, SetArg):
4933 if k in ("selected", "world") or \
4934 not root_config.sets[k].world_candidate:
4939 all_added.append(SETPREFIX + k)
4940 all_added.extend(added_favorites)
4943 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
4944 colorize("INFORM", str(a)), noiselevel=-1)
4946 world_set.update(all_added)
4951 def _loadResumeCommand(self, resume_data, skip_masked=True,
4954 Add a resume command to the graph and validate it in the process. This
4955 will raise a PackageNotFound exception if a package is not available.
4960 if not isinstance(resume_data, dict):
4963 mergelist = resume_data.get("mergelist")
4964 if not isinstance(mergelist, list):
4967 fakedb = self._dynamic_config.mydbapi
4968 trees = self._frozen_config.trees
4969 serialized_tasks = []
4972 if not (isinstance(x, list) and len(x) == 5):
4974 pkg_type, myroot, pkg_key, action, pkg_repo = x
4975 if pkg_type not in self.pkg_tree_map:
4977 if action != "merge":
4979 root_config = self._frozen_config.roots[myroot]
4981 pkg = self._pkg(pkg_key, pkg_type, root_config, myrepo=pkg_repo)
4982 except portage.exception.PackageNotFound:
4983 # It does no exist or it is corrupt.
4985 # TODO: log these somewhere
4989 if "merge" == pkg.operation and \
4990 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
4991 modified_use=self._pkg_use_enabled(pkg)):
4994 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
4996 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4998 self._dynamic_config._unsatisfied_deps_for_display.append(
4999 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5001 fakedb[myroot].cpv_inject(pkg)
5002 serialized_tasks.append(pkg)
5003 self._spinner_update()
5005 if self._dynamic_config._unsatisfied_deps_for_display:
5008 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
5009 self._dynamic_config._serialized_tasks_cache = serialized_tasks
5010 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
5012 self._select_package = self._select_pkg_from_graph
5013 self._dynamic_config.myparams["selective"] = True
5014 # Always traverse deep dependencies in order to account for
5015 # potentially unsatisfied dependencies of installed packages.
5016 # This is necessary for correct --keep-going or --resume operation
5017 # in case a package from a group of circularly dependent packages
5018 # fails. In this case, a package which has recently been installed
5019 # may have an unsatisfied circular dependency (pulled in by
5020 # PDEPEND, for example). So, even though a package is already
5021 # installed, it may not have all of it's dependencies satisfied, so
5022 # it may not be usable. If such a package is in the subgraph of
5023 # deep depenedencies of a scheduled build, that build needs to
5024 # be cancelled. In order for this type of situation to be
5025 # recognized, deep traversal of dependencies is required.
5026 self._dynamic_config.myparams["deep"] = True
5028 favorites = resume_data.get("favorites")
5029 args_set = self._dynamic_config.sets[
5030 self._frozen_config.target_root].sets['__non_set_args__']
5031 if isinstance(favorites, list):
5032 args = self._load_favorites(favorites)
5036 for task in serialized_tasks:
5037 if isinstance(task, Package) and \
5038 task.operation == "merge":
5039 if not self._add_pkg(task, None):
5042 # Packages for argument atoms need to be explicitly
5043 # added via _add_pkg() so that they are included in the
5044 # digraph (needed at least for --tree display).
5045 for arg in self._expand_set_args(args, add_to_digraph=True):
5046 for atom in arg.pset.getAtoms():
5047 pkg, existing_node = self._select_package(
5048 arg.root_config.root, atom)
5049 if existing_node is None and \
5051 if not self._add_pkg(pkg, Dependency(atom=atom,
5052 root=pkg.root, parent=arg)):
5055 # Allow unsatisfied deps here to avoid showing a masking
5056 # message for an unsatisfied dep that isn't necessarily
5058 if not self._create_graph(allow_unsatisfied=True):
5061 unsatisfied_deps = []
5062 for dep in self._dynamic_config._unsatisfied_deps:
5063 if not isinstance(dep.parent, Package):
5065 if dep.parent.operation == "merge":
5066 unsatisfied_deps.append(dep)
5069 # For unsatisfied deps of installed packages, only account for
5070 # them if they are in the subgraph of dependencies of a package
5071 # which is scheduled to be installed.
5072 unsatisfied_install = False
5074 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
5076 node = dep_stack.pop()
5077 if not isinstance(node, Package):
5079 if node.operation == "merge":
5080 unsatisfied_install = True
5082 if node in traversed:
5085 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
5087 if unsatisfied_install:
5088 unsatisfied_deps.append(dep)
5090 if masked_tasks or unsatisfied_deps:
5091 # This probably means that a required package
5092 # was dropped via --skipfirst. It makes the
5093 # resume list invalid, so convert it to a
5094 # UnsatisfiedResumeDep exception.
5095 raise self.UnsatisfiedResumeDep(self,
5096 masked_tasks + unsatisfied_deps)
5097 self._dynamic_config._serialized_tasks_cache = None
5100 except self._unknown_internal_error:
5105 def _load_favorites(self, favorites):
5107 Use a list of favorites to resume state from a
5108 previous select_files() call. This creates similar
5109 DependencyArg instances to those that would have
5110 been created by the original select_files() call.
5111 This allows Package instances to be matched with
5112 DependencyArg instances during graph creation.
5114 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5115 sets = root_config.sets
5116 depgraph_sets = self._dynamic_config.sets[root_config.root]
5119 if not isinstance(x, basestring):
5121 if x in ("system", "world"):
5123 if x.startswith(SETPREFIX):
5124 s = x[len(SETPREFIX):]
5127 if s in depgraph_sets.sets:
5130 depgraph_sets.sets[s] = pset
5131 args.append(SetArg(arg=x, pset=pset,
5132 root_config=root_config))
5136 except portage.exception.InvalidAtom:
5138 args.append(AtomArg(arg=x, atom=x,
5139 root_config=root_config))
5141 self._set_args(args)
5144 class UnsatisfiedResumeDep(portage.exception.PortageException):
5146 A dependency of a resume list is not installed. This
5147 can occur when a required package is dropped from the
5148 merge list via --skipfirst.
5150 def __init__(self, depgraph, value):
5151 portage.exception.PortageException.__init__(self, value)
5152 self.depgraph = depgraph
5154 class _internal_exception(portage.exception.PortageException):
5155 def __init__(self, value=""):
5156 portage.exception.PortageException.__init__(self, value)
5158 class _unknown_internal_error(_internal_exception):
5160 Used by the depgraph internally to terminate graph creation.
5161 The specific reason for the failure should have been dumped
5162 to stderr, unfortunately, the exact reason for the failure
5166 class _serialize_tasks_retry(_internal_exception):
5168 This is raised by the _serialize_tasks() method when it needs to
5169 be called again for some reason. The only case that it's currently
5170 used for is when neglected dependencies need to be added to the
5171 graph in order to avoid making a potentially unsafe decision.
5174 class _backtrack_mask(_internal_exception):
5176 This is raised by _show_unsatisfied_dep() when it's called with
5177 check_backtrack=True and a matching package has been masked by
5181 def need_restart(self):
5182 return self._dynamic_config._need_restart
5184 def success_without_autounmask(self):
5185 return self._dynamic_config._success_without_autounmask
5187 def get_backtrack_infos(self):
5188 return self._dynamic_config._backtrack_infos
5191 class _dep_check_composite_db(dbapi):
5193 A dbapi-like interface that is optimized for use in dep_check() calls.
5194 This is built on top of the existing depgraph package selection logic.
5195 Some packages that have been added to the graph may be masked from this
5196 view in order to influence the atom preference selection that occurs
5199 def __init__(self, depgraph, root):
5200 dbapi.__init__(self)
5201 self._depgraph = depgraph
5203 self._match_cache = {}
5204 self._cpv_pkg_map = {}
5206 def _clear_cache(self):
5207 self._match_cache.clear()
5208 self._cpv_pkg_map.clear()
5210 def match(self, atom):
5211 ret = self._match_cache.get(atom)
5216 atom = self._dep_expand(atom)
5217 pkg, existing = self._depgraph._select_package(self._root, atom)
5221 # Return the highest available from select_package() as well as
5222 # any matching slots in the graph db.
5224 slots.add(pkg.metadata["SLOT"])
5225 if pkg.cp.startswith("virtual/"):
5226 # For new-style virtual lookahead that occurs inside
5227 # dep_check(), examine all slots. This is needed
5228 # so that newer slots will not unnecessarily be pulled in
5229 # when a satisfying lower slot is already installed. For
5230 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5231 # there's no need to pull in a newer slot to satisfy a
5232 # virtual/jdk dependency.
5233 for db, pkg_type, built, installed, db_keys in \
5234 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
5235 for cpv in db.match(atom):
5236 if portage.cpv_getkey(cpv) != pkg.cp:
5238 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5240 if self._visible(pkg):
5241 self._cpv_pkg_map[pkg.cpv] = pkg
5243 slots.remove(pkg.metadata["SLOT"])
5245 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
5246 pkg, existing = self._depgraph._select_package(
5247 self._root, slot_atom)
5250 if not self._visible(pkg):
5252 self._cpv_pkg_map[pkg.cpv] = pkg
5255 self._cpv_sort_ascending(ret)
5256 self._match_cache[orig_atom] = ret
5259 def _visible(self, pkg):
5260 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
5262 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
5263 except (StopIteration, portage.exception.InvalidDependString):
5267 if pkg.installed and not self._depgraph._pkg_visibility_check(pkg):
5269 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
5270 self._root].get(pkg.slot_atom)
5271 if in_graph is None:
5272 # Mask choices for packages which are not the highest visible
5273 # version within their slot (since they usually trigger slot
5275 highest_visible, in_graph = self._depgraph._select_package(
5276 self._root, pkg.slot_atom)
5277 # Note: highest_visible is not necessarily the real highest
5278 # visible, especially when --update is not enabled, so use
5279 # < operator instead of !=.
5280 if pkg < highest_visible:
5282 elif in_graph != pkg:
5283 # Mask choices for packages that would trigger a slot
5284 # conflict with a previously selected package.
5288 def _dep_expand(self, atom):
5290 This is only needed for old installed packages that may
5291 contain atoms that are not fully qualified with a specific
5292 category. Emulate the cpv_expand() function that's used by
5293 dbapi.match() in cases like this. If there are multiple
5294 matches, it's often due to a new-style virtual that has
5295 been added, so try to filter those out to avoid raising
5298 root_config = self._depgraph.roots[self._root]
5300 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5301 if len(expanded_atoms) > 1:
5302 non_virtual_atoms = []
5303 for x in expanded_atoms:
5304 if not x.cp.startswith("virtual/"):
5305 non_virtual_atoms.append(x)
5306 if len(non_virtual_atoms) == 1:
5307 expanded_atoms = non_virtual_atoms
5308 if len(expanded_atoms) > 1:
5309 # compatible with portage.cpv_expand()
5310 raise portage.exception.AmbiguousPackageName(
5311 [x.cp for x in expanded_atoms])
5313 atom = expanded_atoms[0]
5315 null_atom = Atom(insert_category_into_atom(atom, "null"))
5316 cat, atom_pn = portage.catsplit(null_atom.cp)
5317 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5319 # Allow the resolver to choose which virtual.
5320 atom = Atom(null_atom.replace('null/', 'virtual/', 1))
5325 def aux_get(self, cpv, wants):
5326 metadata = self._cpv_pkg_map[cpv].metadata
5327 return [metadata.get(x, "") for x in wants]
5329 def match_pkgs(self, atom):
5330 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
5332 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
5334 if "--quiet" in myopts:
5335 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5336 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
5337 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5338 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
5341 s = search(root_config, spinner, "--searchdesc" in myopts,
5342 "--quiet" not in myopts, "--usepkg" in myopts,
5343 "--usepkgonly" in myopts)
5344 null_cp = portage.dep_getkey(insert_category_into_atom(
5346 cat, atom_pn = portage.catsplit(null_cp)
5347 s.searchkey = atom_pn
5348 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
5351 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
5352 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
5354 def insert_category_into_atom(atom, category):
5355 alphanum = re.search(r'\w', atom)
5357 ret = atom[:alphanum.start()] + "%s/" % category + \
5358 atom[alphanum.start():]
5363 def _spinner_start(spinner, myopts):
5366 if "--quiet" not in myopts and \
5367 ("--pretend" in myopts or "--ask" in myopts or \
5368 "--tree" in myopts or "--verbose" in myopts):
5370 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
5372 elif "--buildpkgonly" in myopts:
5376 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
5377 if "--unordered-display" in myopts:
5378 portage.writemsg_stdout("\n" + \
5379 darkgreen("These are the packages that " + \
5380 "would be %s:" % action) + "\n\n")
5382 portage.writemsg_stdout("\n" + \
5383 darkgreen("These are the packages that " + \
5384 "would be %s, in reverse order:" % action) + "\n\n")
5386 portage.writemsg_stdout("\n" + \
5387 darkgreen("These are the packages that " + \
5388 "would be %s, in order:" % action) + "\n\n")
5390 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
5391 if not show_spinner:
5392 spinner.update = spinner.update_quiet
5395 portage.writemsg_stdout("Calculating dependencies ")
5397 def _spinner_stop(spinner):
5398 if spinner is None or \
5399 spinner.update is spinner.update_quiet:
5402 portage.writemsg_stdout("\b\b... done!\n")
5404 def backtrack_depgraph(settings, trees, myopts, myparams,
5405 myaction, myfiles, spinner):
5407 Raises PackageSetNotFound if myfiles contains a missing package set.
5409 _spinner_start(spinner, myopts)
5411 return _backtrack_depgraph(settings, trees, myopts, myparams,
5412 myaction, myfiles, spinner)
5414 _spinner_stop(spinner)
5417 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
5419 max_depth = myopts.get('--backtrack', 5)
5420 allow_backtracking = max_depth > 0
5421 backtracker = Backtracker(max_depth)
5423 frozen_config = _frozen_depgraph_config(settings, trees,
5427 backtrack_parameters = backtracker.get()
5429 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5430 frozen_config=frozen_config,
5431 allow_backtracking=allow_backtracking,
5432 backtrack_parameters=backtrack_parameters)
5433 success, favorites = mydepgraph.select_files(myfiles)
5435 if success or mydepgraph.success_without_autounmask():
5437 elif mydepgraph.need_restart():
5438 backtracker.feedback(mydepgraph.get_backtrack_infos())
5440 if not (success or mydepgraph.success_without_autounmask()) and backtracker.backtracked():
5441 backtrack_parameters = backtracker.get_best_run()
5443 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
5444 frozen_config=frozen_config,
5445 allow_backtracking=False,
5446 backtrack_parameters=backtrack_parameters)
5447 success, favorites = mydepgraph.select_files(myfiles)
5449 return (success, mydepgraph, favorites)
5452 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5454 Raises PackageSetNotFound if myfiles contains a missing package set.
5456 _spinner_start(spinner, myopts)
5458 return _resume_depgraph(settings, trees, mtimedb, myopts,
5461 _spinner_stop(spinner)
5463 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
5465 Construct a depgraph for the given resume list. This will raise
5466 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
5467 TODO: Return reasons for dropped_tasks, for display/logging.
5469 @returns: (success, depgraph, dropped_tasks)
5472 skip_unsatisfied = True
5473 mergelist = mtimedb["resume"]["mergelist"]
5474 dropped_tasks = set()
5475 frozen_config = _frozen_depgraph_config(settings, trees,
5478 mydepgraph = depgraph(settings, trees,
5479 myopts, myparams, spinner, frozen_config=frozen_config)
5481 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
5482 skip_masked=skip_masked)
5483 except depgraph.UnsatisfiedResumeDep as e:
5484 if not skip_unsatisfied:
5487 graph = mydepgraph._dynamic_config.digraph
5488 unsatisfied_parents = dict((dep.parent, dep.parent) \
5490 traversed_nodes = set()
5491 unsatisfied_stack = list(unsatisfied_parents)
5492 while unsatisfied_stack:
5493 pkg = unsatisfied_stack.pop()
5494 if pkg in traversed_nodes:
5496 traversed_nodes.add(pkg)
5498 # If this package was pulled in by a parent
5499 # package scheduled for merge, removing this
5500 # package may cause the the parent package's
5501 # dependency to become unsatisfied.
5502 for parent_node in graph.parent_nodes(pkg):
5503 if not isinstance(parent_node, Package) \
5504 or parent_node.operation not in ("merge", "nomerge"):
5507 graph.child_nodes(parent_node,
5508 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5509 if pkg in unsatisfied:
5510 unsatisfied_parents[parent_node] = parent_node
5511 unsatisfied_stack.append(parent_node)
5513 pruned_mergelist = []
5515 if isinstance(x, list) and \
5516 tuple(x) not in unsatisfied_parents:
5517 pruned_mergelist.append(x)
5519 # If the mergelist doesn't shrink then this loop is infinite.
5520 if len(pruned_mergelist) == len(mergelist):
5521 # This happens if a package can't be dropped because
5522 # it's already installed, but it has unsatisfied PDEPEND.
5524 mergelist[:] = pruned_mergelist
5526 # Exclude installed packages that have been removed from the graph due
5527 # to failure to build/install runtime dependencies after the dependent
5528 # package has already been installed.
5529 dropped_tasks.update(pkg for pkg in \
5530 unsatisfied_parents if pkg.operation != "nomerge")
5531 mydepgraph.break_refs(unsatisfied_parents)
5533 del e, graph, traversed_nodes, \
5534 unsatisfied_parents, unsatisfied_stack
5538 return (success, mydepgraph, dropped_tasks)
5540 def get_mask_info(root_config, cpv, pkgsettings,
5541 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
5544 metadata = dict(zip(db_keys,
5545 db.aux_get(cpv, db_keys, myrepo=myrepo)))
5549 if metadata is None:
5550 mreasons = ["corruption"]
5552 eapi = metadata['EAPI']
5555 if not portage.eapi_is_supported(eapi):
5556 mreasons = ['EAPI %s' % eapi]
5558 pkg = Package(type_name=pkg_type, root_config=root_config,
5559 cpv=cpv, built=built, installed=installed, metadata=metadata)
5562 if _pkg_use_enabled is not None:
5563 modified_use = _pkg_use_enabled(pkg)
5565 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
5567 return metadata, mreasons
5569 def show_masked_packages(masked_packages):
5570 shown_licenses = set()
5571 shown_comments = set()
5572 # Maybe there is both an ebuild and a binary. Only
5573 # show one of them to avoid redundant appearance.
5575 have_eapi_mask = False
5576 for (root_config, pkgsettings, cpv, repo,
5577 metadata, mreasons) in masked_packages:
5578 output_cpv = cpv + _repo_separator + repo
5579 if output_cpv in shown_cpvs:
5581 shown_cpvs.add(output_cpv)
5582 comment, filename = None, None
5583 if "package.mask" in mreasons:
5584 comment, filename = \
5585 portage.getmaskingreason(
5586 cpv, metadata=metadata,
5587 settings=pkgsettings,
5588 portdb=root_config.trees["porttree"].dbapi,
5589 return_location=True)
5590 missing_licenses = []
5592 if not portage.eapi_is_supported(metadata["EAPI"]):
5593 have_eapi_mask = True
5595 missing_licenses = \
5596 pkgsettings._getMissingLicenses(
5598 except portage.exception.InvalidDependString:
5599 # This will have already been reported
5600 # above via mreasons.
5603 writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
5605 if comment and comment not in shown_comments:
5606 writemsg_stdout(filename + ":\n" + comment + "\n",
5608 shown_comments.add(comment)
5609 portdb = root_config.trees["porttree"].dbapi
5610 for l in missing_licenses:
5611 l_path = portdb.findLicensePath(l)
5612 if l in shown_licenses:
5614 msg = ("A copy of the '%s' license" + \
5615 " is located at '%s'.\n\n") % (l, l_path)
5616 writemsg_stdout(msg, noiselevel=-1)
5617 shown_licenses.add(l)
5618 return have_eapi_mask
5620 def show_mask_docs():
5621 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
5622 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
5624 def show_blocker_docs_link():
5625 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
5626 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
5627 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
5629 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
5630 return [mreason.message for \
5631 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
5633 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
5634 mreasons = _getmaskingstatus(
5635 pkg, settings=pkgsettings,
5636 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
5638 if not pkg.installed:
5639 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
5640 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
5641 pkg.metadata["CHOST"]))
5643 if pkg.metadata["REQUIRED_USE"] and \
5644 eapi_has_required_use(pkg.metadata["EAPI"]):
5645 required_use = pkg.metadata["REQUIRED_USE"]
5647 use = pkg.use.enabled
5649 required_use_is_sat = check_required_use(
5650 required_use, use, pkg.iuse.is_valid_flag)
5651 except portage.exception.InvalidDependString:
5652 mreasons.append(_MaskReason("invalid", "invalid: REQUIRED_USE"))
5654 if not required_use_is_sat:
5655 msg = "violated use flag constraints: '%s'" % required_use
5656 mreasons.append(_MaskReason("REQUIRED_USE", "REQUIRED_USE violated"))
5659 for msg_type, msgs in pkg.invalid.items():
5662 _MaskReason("invalid", "invalid: %s" % (msg,)))
5664 if not pkg.metadata["SLOT"]:
5666 _MaskReason("invalid", "SLOT: undefined"))