1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
13 from collections import deque
14 from itertools import chain
17 from portage import os, OrderedDict
18 from portage import _unicode_decode, _unicode_encode, _encodings
19 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
20 from portage.dbapi import dbapi
21 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
22 check_required_use, human_readable_required_use, _repo_separator
23 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
24 from portage.exception import InvalidAtom, InvalidDependString, PortageException
25 from portage.output import colorize, create_color_func, \
27 bad = create_color_func("BAD")
28 from portage.package.ebuild.getmaskingstatus import \
29 _getmaskingstatus, _MaskReason
30 from portage._sets import SETPREFIX
31 from portage._sets.base import InternalPackageSet
32 from portage.util import ConfigProtect, shlex_split, new_protect_filename
33 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
34 from portage.util import ensure_dirs
35 from portage.util import writemsg_level, write_atomic
36 from portage.util.digraph import digraph
37 from portage.util.listdir import _ignorecvs_dirs
38 from portage.versions import catpkgsplit
40 from _emerge.AtomArg import AtomArg
41 from _emerge.Blocker import Blocker
42 from _emerge.BlockerCache import BlockerCache
43 from _emerge.BlockerDepPriority import BlockerDepPriority
44 from _emerge.countdown import countdown
45 from _emerge.create_world_atom import create_world_atom
46 from _emerge.Dependency import Dependency
47 from _emerge.DependencyArg import DependencyArg
48 from _emerge.DepPriority import DepPriority
49 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
50 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
51 from _emerge.FakeVartree import FakeVartree
52 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
53 from _emerge.is_valid_package_atom import insert_category_into_atom, \
55 from _emerge.Package import Package
56 from _emerge.PackageArg import PackageArg
57 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
58 from _emerge.RootConfig import RootConfig
59 from _emerge.search import search
60 from _emerge.SetArg import SetArg
61 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
62 from _emerge.UnmergeDepPriority import UnmergeDepPriority
63 from _emerge.UseFlagDisplay import pkg_use_display
64 from _emerge.userquery import userquery
66 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
67 from _emerge.resolver.slot_collision import slot_conflict_handler
68 from _emerge.resolver.circular_dependency import circular_dependency_handler
69 from _emerge.resolver.output import Display
71 if sys.hexversion >= 0x3000000:
75 class _scheduler_graph_config(object):
76 def __init__(self, trees, pkg_cache, graph, mergelist):
78 self.pkg_cache = pkg_cache
80 self.mergelist = mergelist
82 def _wildcard_set(atoms):
83 pkgs = InternalPackageSet(allow_wildcard=True)
86 x = Atom(x, allow_wildcard=True)
87 except portage.exception.InvalidAtom:
88 x = Atom("*/" + x, allow_wildcard=True)
92 class _frozen_depgraph_config(object):
94 def __init__(self, settings, trees, myopts, spinner):
95 self.settings = settings
96 self.target_root = settings["ROOT"]
99 if settings.get("PORTAGE_DEBUG", "") == "1":
101 self.spinner = spinner
102 self._running_root = trees["/"]["root_config"]
103 self._opts_no_restart = frozenset(["--buildpkgonly",
104 "--fetchonly", "--fetch-all-uri", "--pretend"])
105 self.pkgsettings = {}
107 self._trees_orig = trees
109 # All Package instances
111 self._highest_license_masked = {}
113 self.trees[myroot] = {}
114 # Create a RootConfig instance that references
115 # the FakeVartree instead of the real one.
116 self.roots[myroot] = RootConfig(
117 trees[myroot]["vartree"].settings,
119 trees[myroot]["root_config"].setconfig)
120 for tree in ("porttree", "bintree"):
121 self.trees[myroot][tree] = trees[myroot][tree]
122 self.trees[myroot]["vartree"] = \
123 FakeVartree(trees[myroot]["root_config"],
124 pkg_cache=self._pkg_cache,
125 pkg_root_config=self.roots[myroot])
126 self.pkgsettings[myroot] = portage.config(
127 clone=self.trees[myroot]["vartree"].settings)
129 self._required_set_names = set(["world"])
131 atoms = ' '.join(myopts.get("--exclude", [])).split()
132 self.excluded_pkgs = _wildcard_set(atoms)
133 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
134 self.reinstall_atoms = _wildcard_set(atoms)
135 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
136 self.usepkg_exclude = _wildcard_set(atoms)
137 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
138 self.useoldpkg_atoms = _wildcard_set(atoms)
139 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
140 self.rebuild_exclude = _wildcard_set(atoms)
141 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
142 self.rebuild_ignore = _wildcard_set(atoms)
144 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
145 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
146 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
148 class _depgraph_sets(object):
150 # contains all sets added to the graph
152 # contains non-set atoms given as arguments
153 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
154 # contains all atoms from all sets added to the graph, including
155 # atoms given as arguments
156 self.atoms = InternalPackageSet(allow_repo=True)
157 self.atom_arg_map = {}
159 class _rebuild_config(object):
160 def __init__(self, frozen_config, backtrack_parameters):
161 self._graph = digraph()
162 self._frozen_config = frozen_config
163 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
164 self.orig_rebuild_list = self.rebuild_list.copy()
165 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
166 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
167 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
168 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
169 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
170 self.rebuild_if_unbuilt)
172 def add(self, dep_pkg, dep):
173 parent = dep.collapsed_parent
174 priority = dep.collapsed_priority
175 rebuild_exclude = self._frozen_config.rebuild_exclude
176 rebuild_ignore = self._frozen_config.rebuild_ignore
177 if (self.rebuild and isinstance(parent, Package) and
178 parent.built and priority.buildtime and
179 isinstance(dep_pkg, Package) and
180 not rebuild_exclude.findAtomForPackage(parent) and
181 not rebuild_ignore.findAtomForPackage(dep_pkg)):
182 self._graph.add(dep_pkg, parent, priority)
184 def _needs_rebuild(self, dep_pkg):
185 """Check whether packages that depend on dep_pkg need to be rebuilt."""
186 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
187 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
190 if self.rebuild_if_unbuilt:
191 # dep_pkg is being installed from source, so binary
192 # packages for parents are invalid. Force rebuild
195 trees = self._frozen_config.trees
196 vardb = trees[dep_pkg.root]["vartree"].dbapi
197 if self.rebuild_if_new_rev:
198 # Parent packages are valid if a package with the same
199 # cpv is already installed.
200 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
202 # Otherwise, parent packages are valid if a package with the same
203 # version (excluding revision) is already installed.
204 assert self.rebuild_if_new_ver
205 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
206 for inst_cpv in vardb.match(dep_pkg.slot_atom):
207 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
208 if inst_cpv_norev == cpv_norev:
213 def _trigger_rebuild(self, parent, build_deps):
214 root_slot = (parent.root, parent.slot_atom)
215 if root_slot in self.rebuild_list:
217 trees = self._frozen_config.trees
219 for slot_atom, dep_pkg in build_deps.items():
220 dep_root_slot = (dep_pkg.root, slot_atom)
221 if self._needs_rebuild(dep_pkg):
222 self.rebuild_list.add(root_slot)
224 elif ("--usepkg" in self._frozen_config.myopts and
225 (dep_root_slot in self.reinstall_list or
226 dep_root_slot in self.rebuild_list or
227 not dep_pkg.installed)):
229 # A direct rebuild dependency is being installed. We
230 # should update the parent as well to the latest binary,
231 # if that binary is valid.
233 # To validate the binary, we check whether all of the
234 # rebuild dependencies are present on the same binhost.
236 # 1) If parent is present on the binhost, but one of its
237 # rebuild dependencies is not, then the parent should
238 # be rebuilt from source.
239 # 2) Otherwise, the parent binary is assumed to be valid,
240 # because all of its rebuild dependencies are
242 bintree = trees[parent.root]["bintree"]
243 uri = bintree.get_pkgindex_uri(parent.cpv)
244 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
245 bindb = bintree.dbapi
246 if self.rebuild_if_new_ver and uri and uri != dep_uri:
247 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
248 for cpv in bindb.match(dep_pkg.slot_atom):
249 if cpv_norev == catpkgsplit(cpv)[:-1]:
250 dep_uri = bintree.get_pkgindex_uri(cpv)
253 if uri and uri != dep_uri:
254 # 1) Remote binary package is invalid because it was
255 # built without dep_pkg. Force rebuild.
256 self.rebuild_list.add(root_slot)
258 elif (parent.installed and
259 root_slot not in self.reinstall_list):
260 inst_build_time = parent.metadata.get("BUILD_TIME")
262 bin_build_time, = bindb.aux_get(parent.cpv,
266 if bin_build_time != inst_build_time:
267 # 2) Remote binary package is valid, and local package
268 # is not up to date. Force reinstall.
271 self.reinstall_list.add(root_slot)
274 def trigger_rebuilds(self):
276 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
277 depends on pkgA at both build-time and run-time, pkgB needs to be
284 leaf_nodes = deque(graph.leaf_nodes())
286 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
287 # will always know which children are being rebuilt.
290 # We'll have to drop an edge. This should be quite rare.
291 leaf_nodes.append(graph.order[-1])
293 node = leaf_nodes.popleft()
294 if node not in graph:
295 # This can be triggered by circular dependencies.
297 slot_atom = node.slot_atom
299 # Remove our leaf node from the graph, keeping track of deps.
300 parents = graph.parent_nodes(node)
302 node_build_deps = build_deps.get(node, {})
303 for parent in parents:
305 # Ignore a direct cycle.
307 parent_bdeps = build_deps.setdefault(parent, {})
308 parent_bdeps[slot_atom] = node
309 if not graph.child_nodes(parent):
310 leaf_nodes.append(parent)
312 # Trigger rebuilds for our leaf node. Because all of our children
313 # have been processed, the build_deps will be completely filled in,
314 # and self.rebuild_list / self.reinstall_list will tell us whether
315 # any of our children need to be rebuilt or reinstalled.
316 if self._trigger_rebuild(node, node_build_deps):
322 class _dynamic_depgraph_config(object):
324 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
325 self.myparams = myparams.copy()
326 self._vdb_loaded = False
327 self._allow_backtracking = allow_backtracking
328 # Maps slot atom to package for each Package added to the graph.
329 self._slot_pkg_map = {}
330 # Maps nodes to the reasons they were selected for reinstallation.
331 self._reinstall_nodes = {}
333 # Contains a filtered view of preferred packages that are selected
334 # from available repositories.
335 self._filtered_trees = {}
336 # Contains installed packages and new packages that have been added
338 self._graph_trees = {}
339 # Caches visible packages returned from _select_package, for use in
340 # depgraph._iter_atoms_for_pkg() SLOT logic.
341 self._visible_pkgs = {}
342 #contains the args created by select_files
343 self._initial_arg_list = []
344 self.digraph = portage.digraph()
345 # manages sets added to the graph
347 # contains all nodes pulled in by self.sets
348 self._set_nodes = set()
349 # Contains only Blocker -> Uninstall edges
350 self._blocker_uninstalls = digraph()
351 # Contains only Package -> Blocker edges
352 self._blocker_parents = digraph()
353 # Contains only irrelevant Package -> Blocker edges
354 self._irrelevant_blockers = digraph()
355 # Contains only unsolvable Package -> Blocker edges
356 self._unsolvable_blockers = digraph()
357 # Contains all Blocker -> Blocked Package edges
358 self._blocked_pkgs = digraph()
359 # Contains world packages that have been protected from
360 # uninstallation but may not have been added to the graph
361 # if the graph is not complete yet.
362 self._blocked_world_pkgs = {}
363 # Contains packages whose dependencies have been traversed.
364 # This use used to check if we have accounted for blockers
365 # relevant to a package.
366 self._traversed_pkg_deps = set()
367 self._slot_collision_info = {}
368 # Slot collision nodes are not allowed to block other packages since
369 # blocker validation is only able to account for one package per slot.
370 self._slot_collision_nodes = set()
371 self._parent_atoms = {}
372 self._slot_conflict_parent_atoms = set()
373 self._slot_conflict_handler = None
374 self._circular_dependency_handler = None
375 self._serialized_tasks_cache = None
376 self._scheduler_graph = None
377 self._displayed_list = None
378 self._pprovided_args = []
379 self._missing_args = []
380 self._masked_installed = set()
381 self._masked_license_updates = set()
382 self._unsatisfied_deps_for_display = []
383 self._unsatisfied_blockers_for_display = None
384 self._circular_deps_for_display = None
386 self._dep_disjunctive_stack = []
387 self._unsatisfied_deps = []
388 self._initially_unsatisfied_deps = []
389 self._ignored_deps = []
390 self._highest_pkg_cache = {}
392 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
393 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
394 self._needed_license_changes = backtrack_parameters.needed_license_changes
395 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
396 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
397 self._need_restart = False
398 # For conditions that always require user intervention, such as
399 # unsatisfied REQUIRED_USE (currently has no autounmask support).
400 self._skip_restart = False
401 self._backtrack_infos = {}
403 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
404 self._success_without_autounmask = False
405 self._traverse_ignored_deps = False
407 for myroot in depgraph._frozen_config.trees:
408 self.sets[myroot] = _depgraph_sets()
409 self._slot_pkg_map[myroot] = {}
410 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
411 # This dbapi instance will model the state that the vdb will
412 # have after new packages have been installed.
413 fakedb = PackageVirtualDbapi(vardb.settings)
415 self.mydbapi[myroot] = fakedb
418 graph_tree.dbapi = fakedb
419 self._graph_trees[myroot] = {}
420 self._filtered_trees[myroot] = {}
421 # Substitute the graph tree for the vartree in dep_check() since we
422 # want atom selections to be consistent with package selections
423 # have already been made.
424 self._graph_trees[myroot]["porttree"] = graph_tree
425 self._graph_trees[myroot]["vartree"] = graph_tree
426 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
427 self._graph_trees[myroot]["graph"] = self.digraph
430 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
431 self._filtered_trees[myroot]["porttree"] = filtered_tree
432 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
434 # Passing in graph_tree as the vartree here could lead to better
435 # atom selections in some cases by causing atoms for packages that
436 # have been added to the graph to be preferred over other choices.
437 # However, it can trigger atom selections that result in
438 # unresolvable direct circular dependencies. For example, this
439 # happens with gwydion-dylan which depends on either itself or
440 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
441 # gwydion-dylan-bin needs to be selected in order to avoid a
442 # an unresolvable direct circular dependency.
444 # To solve the problem described above, pass in "graph_db" so that
445 # packages that have been added to the graph are distinguishable
446 # from other available packages and installed packages. Also, pass
447 # the parent package into self._select_atoms() calls so that
448 # unresolvable direct circular dependencies can be detected and
449 # avoided when possible.
450 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
451 self._filtered_trees[myroot]["graph"] = self.digraph
452 self._filtered_trees[myroot]["vartree"] = \
453 depgraph._frozen_config.trees[myroot]["vartree"]
456 # (db, pkg_type, built, installed, db_keys)
457 if "remove" in self.myparams:
458 # For removal operations, use _dep_check_composite_db
459 # for availability and visibility checks. This provides
460 # consistency with install operations, so we don't
461 # get install/uninstall cycles like in bug #332719.
462 self._graph_trees[myroot]["porttree"] = filtered_tree
464 if "--usepkgonly" not in depgraph._frozen_config.myopts:
465 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
466 db_keys = list(portdb._aux_cache_keys)
467 dbs.append((portdb, "ebuild", False, False, db_keys))
469 if "--usepkg" in depgraph._frozen_config.myopts:
470 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
471 db_keys = list(bindb._aux_cache_keys)
472 dbs.append((bindb, "binary", True, False, db_keys))
474 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
475 db_keys = list(depgraph._frozen_config._trees_orig[myroot
476 ]["vartree"].dbapi._aux_cache_keys)
477 dbs.append((vardb, "installed", True, True, db_keys))
478 self._filtered_trees[myroot]["dbs"] = dbs
480 class depgraph(object):
482 pkg_tree_map = RootConfig.pkg_tree_map
484 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
486 def __init__(self, settings, trees, myopts, myparams, spinner,
487 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
488 if frozen_config is None:
489 frozen_config = _frozen_depgraph_config(settings, trees,
491 self._frozen_config = frozen_config
492 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
493 allow_backtracking, backtrack_parameters)
494 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
496 self._select_atoms = self._select_atoms_highest_available
497 self._select_package = self._select_pkg_highest_available
501 Load installed package metadata if appropriate. This used to be called
502 from the constructor, but that wasn't very nice since this procedure
503 is slow and it generates spinner output. So, now it's called on-demand
504 by various methods when necessary.
507 if self._dynamic_config._vdb_loaded:
510 for myroot in self._frozen_config.trees:
512 preload_installed_pkgs = \
513 "--nodeps" not in self._frozen_config.myopts
515 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
516 if not fake_vartree.dbapi:
517 # This needs to be called for the first depgraph, but not for
518 # backtracking depgraphs that share the same frozen_config.
521 # FakeVartree.sync() populates virtuals, and we want
522 # self.pkgsettings to have them populated too.
523 self._frozen_config.pkgsettings[myroot] = \
524 portage.config(clone=fake_vartree.settings)
526 if preload_installed_pkgs:
527 vardb = fake_vartree.dbapi
528 fakedb = self._dynamic_config._graph_trees[
529 myroot]["vartree"].dbapi
532 self._spinner_update()
533 # This triggers metadata updates via FakeVartree.
534 vardb.aux_get(pkg.cpv, [])
535 fakedb.cpv_inject(pkg)
537 self._dynamic_config._vdb_loaded = True
539 def _spinner_update(self):
540 if self._frozen_config.spinner:
541 self._frozen_config.spinner.update()
543 def _show_missed_update(self):
545 # In order to minimize noise, show only the highest
546 # missed update from each SLOT.
548 for pkg, mask_reasons in \
549 self._dynamic_config._runtime_pkg_mask.items():
551 # Exclude installed here since we only
552 # want to show available updates.
554 k = (pkg.root, pkg.slot_atom)
555 if k in missed_updates:
556 other_pkg, mask_type, parent_atoms = missed_updates[k]
559 for mask_type, parent_atoms in mask_reasons.items():
562 missed_updates[k] = (pkg, mask_type, parent_atoms)
565 if not missed_updates:
568 missed_update_types = {}
569 for pkg, mask_type, parent_atoms in missed_updates.values():
570 missed_update_types.setdefault(mask_type,
571 []).append((pkg, parent_atoms))
573 if '--quiet' in self._frozen_config.myopts and \
574 '--debug' not in self._frozen_config.myopts:
575 missed_update_types.pop("slot conflict", None)
576 missed_update_types.pop("missing dependency", None)
578 self._show_missed_update_slot_conflicts(
579 missed_update_types.get("slot conflict"))
581 self._show_missed_update_unsatisfied_dep(
582 missed_update_types.get("missing dependency"))
584 def _show_missed_update_unsatisfied_dep(self, missed_updates):
586 if not missed_updates:
589 self._show_merge_list()
590 backtrack_masked = []
592 for pkg, parent_atoms in missed_updates:
595 for parent, root, atom in parent_atoms:
596 self._show_unsatisfied_dep(root, atom, myparent=parent,
597 check_backtrack=True)
598 except self._backtrack_mask:
599 # This is displayed below in abbreviated form.
600 backtrack_masked.append((pkg, parent_atoms))
603 writemsg("\n!!! The following update has been skipped " + \
604 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
606 writemsg(str(pkg.slot_atom), noiselevel=-1)
608 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
609 writemsg("\n", noiselevel=-1)
611 for parent, root, atom in parent_atoms:
612 self._show_unsatisfied_dep(root, atom, myparent=parent)
613 writemsg("\n", noiselevel=-1)
616 # These are shown in abbreviated form, in order to avoid terminal
617 # flooding from mask messages as reported in bug #285832.
618 writemsg("\n!!! The following update(s) have been skipped " + \
619 "due to unsatisfied dependencies\n" + \
620 "!!! triggered by backtracking:\n\n", noiselevel=-1)
621 for pkg, parent_atoms in backtrack_masked:
622 writemsg(str(pkg.slot_atom), noiselevel=-1)
624 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
625 writemsg("\n", noiselevel=-1)
627 def _show_missed_update_slot_conflicts(self, missed_updates):
629 if not missed_updates:
632 self._show_merge_list()
634 msg.append("\nWARNING: One or more updates have been " + \
635 "skipped due to a dependency conflict:\n\n")
638 for pkg, parent_atoms in missed_updates:
639 msg.append(str(pkg.slot_atom))
641 msg.append(" for %s" % (pkg.root,))
644 for parent, atom in parent_atoms:
648 msg.append(" conflicts with\n")
650 if isinstance(parent,
651 (PackageArg, AtomArg)):
652 # For PackageArg and AtomArg types, it's
653 # redundant to display the atom attribute.
654 msg.append(str(parent))
656 # Display the specific atom from SetArg or
658 msg.append("%s required by %s" % (atom, parent))
662 writemsg("".join(msg), noiselevel=-1)
664 def _show_slot_collision_notice(self):
665 """Show an informational message advising the user to mask one of the
666 the packages. In some cases it may be possible to resolve this
667 automatically, but support for backtracking (removal nodes that have
668 already been selected) will be required in order to handle all possible
672 if not self._dynamic_config._slot_collision_info:
675 self._show_merge_list()
677 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
678 handler = self._dynamic_config._slot_conflict_handler
680 conflict = handler.get_conflict()
681 writemsg(conflict, noiselevel=-1)
683 explanation = handler.get_explanation()
685 writemsg(explanation, noiselevel=-1)
688 if "--quiet" in self._frozen_config.myopts:
692 msg.append("It may be possible to solve this problem ")
693 msg.append("by using package.mask to prevent one of ")
694 msg.append("those packages from being selected. ")
695 msg.append("However, it is also possible that conflicting ")
696 msg.append("dependencies exist such that they are impossible to ")
697 msg.append("satisfy simultaneously. If such a conflict exists in ")
698 msg.append("the dependencies of two different packages, then those ")
699 msg.append("packages can not be installed simultaneously.")
700 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
701 if not self._dynamic_config._allow_backtracking and \
702 (backtrack_opt is None or \
703 (backtrack_opt > 0 and backtrack_opt < 30)):
704 msg.append(" You may want to try a larger value of the ")
705 msg.append("--backtrack option, such as --backtrack=30, ")
706 msg.append("in order to see if that will solve this conflict ")
707 msg.append("automatically.")
709 for line in textwrap.wrap(''.join(msg), 70):
710 writemsg(line + '\n', noiselevel=-1)
711 writemsg('\n', noiselevel=-1)
714 msg.append("For more information, see MASKED PACKAGES ")
715 msg.append("section in the emerge man page or refer ")
716 msg.append("to the Gentoo Handbook.")
717 for line in textwrap.wrap(''.join(msg), 70):
718 writemsg(line + '\n', noiselevel=-1)
719 writemsg('\n', noiselevel=-1)
721 def _process_slot_conflicts(self):
723 Process slot conflict data to identify specific atoms which
724 lead to conflict. These atoms only match a subset of the
725 packages that have been pulled into a given slot.
727 for (slot_atom, root), slot_nodes \
728 in self._dynamic_config._slot_collision_info.items():
730 all_parent_atoms = set()
731 for pkg in slot_nodes:
732 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
735 all_parent_atoms.update(parent_atoms)
737 for pkg in slot_nodes:
738 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
739 if parent_atoms is None:
741 self._dynamic_config._parent_atoms[pkg] = parent_atoms
742 for parent_atom in all_parent_atoms:
743 if parent_atom in parent_atoms:
745 # Use package set for matching since it will match via
746 # PROVIDE when necessary, while match_from_list does not.
747 parent, atom = parent_atom
748 atom_set = InternalPackageSet(
749 initial_atoms=(atom,), allow_repo=True)
750 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
751 parent_atoms.add(parent_atom)
753 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
755 def _reinstall_for_flags(self, forced_flags,
756 orig_use, orig_iuse, cur_use, cur_iuse):
757 """Return a set of flags that trigger reinstallation, or None if there
758 are no such flags."""
759 if "--newuse" in self._frozen_config.myopts or \
760 "--binpkg-respect-use" in self._frozen_config.myopts:
761 flags = set(orig_iuse.symmetric_difference(
762 cur_iuse).difference(forced_flags))
763 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
764 cur_iuse.intersection(cur_use)))
767 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
768 flags = orig_iuse.intersection(orig_use).symmetric_difference(
769 cur_iuse.intersection(cur_use))
774 def _create_graph(self, allow_unsatisfied=False):
775 dep_stack = self._dynamic_config._dep_stack
776 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
777 while dep_stack or dep_disjunctive_stack:
778 self._spinner_update()
780 dep = dep_stack.pop()
781 if isinstance(dep, Package):
782 if not self._add_pkg_deps(dep,
783 allow_unsatisfied=allow_unsatisfied):
786 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
788 if dep_disjunctive_stack:
789 if not self._pop_disjunction(allow_unsatisfied):
793 def _expand_set_args(self, input_args, add_to_digraph=False):
795 Iterate over a list of DependencyArg instances and yield all
796 instances given in the input together with additional SetArg
797 instances that are generated from nested sets.
798 @param input_args: An iterable of DependencyArg instances
799 @type input_args: Iterable
800 @param add_to_digraph: If True then add SetArg instances
801 to the digraph, in order to record parent -> child
802 relationships from nested sets
803 @type add_to_digraph: Boolean
805 @returns: All args given in the input together with additional
806 SetArg instances that are generated from nested sets
809 traversed_set_args = set()
811 for arg in input_args:
812 if not isinstance(arg, SetArg):
816 root_config = arg.root_config
817 depgraph_sets = self._dynamic_config.sets[root_config.root]
820 arg = arg_stack.pop()
821 if arg in traversed_set_args:
823 traversed_set_args.add(arg)
826 self._dynamic_config.digraph.add(arg, None,
827 priority=BlockerDepPriority.instance)
831 # Traverse nested sets and add them to the stack
832 # if they're not already in the graph. Also, graph
833 # edges between parent and nested sets.
834 for token in arg.pset.getNonAtoms():
835 if not token.startswith(SETPREFIX):
837 s = token[len(SETPREFIX):]
838 nested_set = depgraph_sets.sets.get(s)
839 if nested_set is None:
840 nested_set = root_config.sets.get(s)
841 if nested_set is not None:
842 nested_arg = SetArg(arg=token, pset=nested_set,
843 root_config=root_config)
844 arg_stack.append(nested_arg)
846 self._dynamic_config.digraph.add(nested_arg, arg,
847 priority=BlockerDepPriority.instance)
848 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
850 def _add_dep(self, dep, allow_unsatisfied=False):
851 debug = "--debug" in self._frozen_config.myopts
852 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
853 nodeps = "--nodeps" in self._frozen_config.myopts
854 deep = self._dynamic_config.myparams.get("deep", 0)
855 recurse = deep is True or dep.depth <= deep
857 if not buildpkgonly and \
859 not dep.collapsed_priority.ignored and \
860 not dep.collapsed_priority.optional and \
861 dep.parent not in self._dynamic_config._slot_collision_nodes:
862 if dep.parent.onlydeps:
863 # It's safe to ignore blockers if the
864 # parent is an --onlydeps node.
866 # The blocker applies to the root where
867 # the parent is or will be installed.
868 blocker = Blocker(atom=dep.atom,
869 eapi=dep.parent.metadata["EAPI"],
870 priority=dep.priority, root=dep.parent.root)
871 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
874 if dep.child is None:
875 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
876 onlydeps=dep.onlydeps)
878 # The caller has selected a specific package
879 # via self._minimize_packages().
881 existing_node = self._dynamic_config._slot_pkg_map[
882 dep.root].get(dep_pkg.slot_atom)
885 if (dep.collapsed_priority.optional or
886 dep.collapsed_priority.ignored):
887 # This is an unnecessary build-time dep.
889 if allow_unsatisfied:
890 self._dynamic_config._unsatisfied_deps.append(dep)
892 self._dynamic_config._unsatisfied_deps_for_display.append(
893 ((dep.root, dep.atom), {"myparent":dep.parent}))
895 # The parent node should not already be in
896 # runtime_pkg_mask, since that would trigger an
897 # infinite backtracking loop.
898 if self._dynamic_config._allow_backtracking:
899 if dep.parent in self._dynamic_config._runtime_pkg_mask:
900 if "--debug" in self._frozen_config.myopts:
902 "!!! backtracking loop detected: %s %s\n" % \
904 self._dynamic_config._runtime_pkg_mask[
905 dep.parent]), noiselevel=-1)
906 elif not self.need_restart():
907 # Do not backtrack if only USE have to be changed in
908 # order to satisfy the dependency.
909 dep_pkg, existing_node = \
910 self._select_package(dep.root, dep.atom.without_use,
911 onlydeps=dep.onlydeps)
913 self._dynamic_config._backtrack_infos["missing dependency"] = dep
914 self._dynamic_config._need_restart = True
915 if "--debug" in self._frozen_config.myopts:
919 msg.append("backtracking due to unsatisfied dep:")
920 msg.append(" parent: %s" % dep.parent)
921 msg.append(" priority: %s" % dep.priority)
922 msg.append(" root: %s" % dep.root)
923 msg.append(" atom: %s" % dep.atom)
925 writemsg_level("".join("%s\n" % l for l in msg),
926 noiselevel=-1, level=logging.DEBUG)
930 self._rebuild.add(dep_pkg, dep)
932 ignore = dep.collapsed_priority.ignored and \
933 not self._dynamic_config._traverse_ignored_deps
934 if not ignore and not self._add_pkg(dep_pkg, dep):
938 def _check_slot_conflict(self, pkg, atom):
939 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
942 matches = pkg.cpv == existing_node.cpv
943 if pkg != existing_node and \
945 # Use package set for matching since it will match via
946 # PROVIDE when necessary, while match_from_list does not.
947 matches = bool(InternalPackageSet(initial_atoms=(atom,),
948 allow_repo=True).findAtomForPackage(existing_node,
949 modified_use=self._pkg_use_enabled(existing_node)))
951 return (existing_node, matches)
953 def _add_pkg(self, pkg, dep):
955 Adds a package to the depgraph, queues dependencies, and handles
958 debug = "--debug" in self._frozen_config.myopts
965 myparent = dep.parent
966 priority = dep.priority
969 priority = DepPriority()
973 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
974 pkg_use_display(pkg, self._frozen_config.myopts,
975 modified_use=self._pkg_use_enabled(pkg))),
976 level=logging.DEBUG, noiselevel=-1)
977 if isinstance(myparent,
978 (PackageArg, AtomArg)):
979 # For PackageArg and AtomArg types, it's
980 # redundant to display the atom attribute.
982 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
983 level=logging.DEBUG, noiselevel=-1)
985 # Display the specific atom from SetArg or
988 "%s%s required by %s\n" %
989 ("Parent Dep:".ljust(15), dep.atom, myparent),
990 level=logging.DEBUG, noiselevel=-1)
992 # Ensure that the dependencies of the same package
993 # are never processed more than once.
994 previously_added = pkg in self._dynamic_config.digraph
996 # select the correct /var database that we'll be checking against
997 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
998 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1003 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1004 except portage.exception.InvalidDependString as e:
1005 if not pkg.installed:
1006 # should have been masked before it was selected
1010 # NOTE: REQUIRED_USE checks are delayed until after
1011 # package selection, since we want to prompt the user
1012 # for USE adjustment rather than have REQUIRED_USE
1013 # affect package selection and || dep choices.
1014 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
1015 eapi_has_required_use(pkg.metadata["EAPI"]):
1016 required_use_is_sat = check_required_use(
1017 pkg.metadata["REQUIRED_USE"],
1018 self._pkg_use_enabled(pkg),
1019 pkg.iuse.is_valid_flag)
1020 if not required_use_is_sat:
1021 if dep.atom is not None and dep.parent is not None:
1022 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1025 for parent_atom in arg_atoms:
1026 parent, atom = parent_atom
1027 self._add_parent_atom(pkg, parent_atom)
1031 atom = Atom("=" + pkg.cpv)
1032 self._dynamic_config._unsatisfied_deps_for_display.append(
1033 ((pkg.root, atom), {"myparent":dep.parent}))
1034 self._dynamic_config._skip_restart = True
1037 if not pkg.onlydeps:
1039 existing_node, existing_node_matches = \
1040 self._check_slot_conflict(pkg, dep.atom)
1041 slot_collision = False
1043 if existing_node_matches:
1044 # The existing node can be reused.
1046 for parent_atom in arg_atoms:
1047 parent, atom = parent_atom
1048 self._dynamic_config.digraph.add(existing_node, parent,
1050 self._add_parent_atom(existing_node, parent_atom)
1051 # If a direct circular dependency is not an unsatisfied
1052 # buildtime dependency then drop it here since otherwise
1053 # it can skew the merge order calculation in an unwanted
1055 if existing_node != myparent or \
1056 (priority.buildtime and not priority.satisfied):
1057 self._dynamic_config.digraph.addnode(existing_node, myparent,
1059 if dep.atom is not None and dep.parent is not None:
1060 self._add_parent_atom(existing_node,
1061 (dep.parent, dep.atom))
1064 # A slot conflict has occurred.
1065 # The existing node should not already be in
1066 # runtime_pkg_mask, since that would trigger an
1067 # infinite backtracking loop.
1068 if self._dynamic_config._allow_backtracking and \
1070 self._dynamic_config._runtime_pkg_mask:
1071 if "--debug" in self._frozen_config.myopts:
1073 "!!! backtracking loop detected: %s %s\n" % \
1075 self._dynamic_config._runtime_pkg_mask[
1076 existing_node]), noiselevel=-1)
1077 elif self._dynamic_config._allow_backtracking and \
1078 not self._accept_blocker_conflicts() and \
1079 not self.need_restart():
1081 self._add_slot_conflict(pkg)
1082 if dep.atom is not None and dep.parent is not None:
1083 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1086 for parent_atom in arg_atoms:
1087 parent, atom = parent_atom
1088 self._add_parent_atom(pkg, parent_atom)
1089 self._process_slot_conflicts()
1094 # The ordering of backtrack_data can make
1095 # a difference here, because both mask actions may lead
1096 # to valid, but different, solutions and the one with
1097 # 'existing_node' masked is usually the better one. Because
1098 # of that, we choose an order such that
1099 # the backtracker will first explore the choice with
1100 # existing_node masked. The backtracker reverses the
1101 # order, so the order it uses is the reverse of the
1102 # order shown here. See bug #339606.
1103 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
1104 # For missed update messages, find out which
1105 # atoms matched to_be_selected that did not
1106 # match to_be_masked.
1108 self._dynamic_config._parent_atoms.get(to_be_selected, set())
1110 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
1112 parent_atoms = conflict_atoms
1114 all_parents.update(parent_atoms)
1117 for parent, atom in parent_atoms:
1118 i = InternalPackageSet(initial_atoms=(atom,),
1120 if not i.findAtomForPackage(to_be_masked):
1124 if to_be_selected >= to_be_masked:
1125 # We only care about the parent atoms
1126 # when they trigger a downgrade.
1127 parent_atoms = set()
1129 fallback_data.append((to_be_masked, parent_atoms))
1132 # 'to_be_masked' does not violate any parent atom, which means
1133 # there is no point in masking it.
1136 backtrack_data.append((to_be_masked, parent_atoms))
1138 if not backtrack_data:
1139 # This shouldn't happen, but fall back to the old
1140 # behavior if this gets triggered somehow.
1141 backtrack_data = fallback_data
1143 if len(backtrack_data) > 1:
1144 # NOTE: Generally, we prefer to mask the higher
1145 # version since this solves common cases in which a
1146 # lower version is needed so that all dependencies
1147 # will be satisfied (bug #337178). However, if
1148 # existing_node happens to be installed then we
1149 # mask that since this is a common case that is
1150 # triggered when --update is not enabled.
1151 if existing_node.installed:
1153 elif pkg > existing_node:
1154 backtrack_data.reverse()
1156 to_be_masked = backtrack_data[-1][0]
1158 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
1159 self._dynamic_config._need_restart = True
1160 if "--debug" in self._frozen_config.myopts:
1164 msg.append("backtracking due to slot conflict:")
1165 if backtrack_data is fallback_data:
1166 msg.append("!!! backtrack_data fallback")
1167 msg.append(" first package: %s" % existing_node)
1168 msg.append(" second package: %s" % pkg)
1169 msg.append(" package to mask: %s" % to_be_masked)
1170 msg.append(" slot: %s" % pkg.slot_atom)
1171 msg.append(" parents: %s" % ", ".join( \
1172 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1174 writemsg_level("".join("%s\n" % l for l in msg),
1175 noiselevel=-1, level=logging.DEBUG)
1178 # A slot collision has occurred. Sometimes this coincides
1179 # with unresolvable blockers, so the slot collision will be
1180 # shown later if there are no unresolvable blockers.
1181 self._add_slot_conflict(pkg)
1182 slot_collision = True
1186 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1187 existing_node, pkg_use_display(existing_node,
1188 self._frozen_config.myopts,
1189 modified_use=self._pkg_use_enabled(existing_node))),
1190 level=logging.DEBUG, noiselevel=-1)
1193 # Now add this node to the graph so that self.display()
1194 # can show use flags and --tree portage.output. This node is
1195 # only being partially added to the graph. It must not be
1196 # allowed to interfere with the other nodes that have been
1197 # added. Do not overwrite data for existing nodes in
1198 # self._dynamic_config.mydbapi since that data will be used for blocker
1200 # Even though the graph is now invalid, continue to process
1201 # dependencies so that things like --fetchonly can still
1202 # function despite collisions.
1204 elif not previously_added:
1205 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1206 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1207 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1208 self._dynamic_config._highest_pkg_cache.clear()
1209 self._check_masks(pkg)
1211 if not pkg.installed:
1212 # Allow this package to satisfy old-style virtuals in case it
1213 # doesn't already. Any pre-existing providers will be preferred
1216 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1217 # For consistency, also update the global virtuals.
1218 settings = self._frozen_config.roots[pkg.root].settings
1220 settings.setinst(pkg.cpv, pkg.metadata)
1222 except portage.exception.InvalidDependString as e:
1223 if not pkg.installed:
1224 # should have been masked before it was selected
1228 self._dynamic_config._set_nodes.add(pkg)
1230 # Do this even when addme is False (--onlydeps) so that the
1231 # parent/child relationship is always known in case
1232 # self._show_slot_collision_notice() needs to be called later.
1233 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1234 if dep.atom is not None and dep.parent is not None:
1235 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1238 for parent_atom in arg_atoms:
1239 parent, atom = parent_atom
1240 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1241 self._add_parent_atom(pkg, parent_atom)
1243 """ This section determines whether we go deeper into dependencies or not.
1244 We want to go deeper on a few occasions:
1245 Installing package A, we need to make sure package A's deps are met.
1246 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1247 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1252 deep = self._dynamic_config.myparams.get("deep", 0)
1253 recurse = deep is True or depth + 1 <= deep
1254 dep_stack = self._dynamic_config._dep_stack
1255 if "recurse" not in self._dynamic_config.myparams:
1257 elif pkg.installed and not recurse:
1258 dep_stack = self._dynamic_config._ignored_deps
1260 self._spinner_update()
1262 if not previously_added:
1263 dep_stack.append(pkg)
1266 def _check_masks(self, pkg):
1268 slot_key = (pkg.root, pkg.slot_atom)
1270 # Check for upgrades in the same slot that are
1271 # masked due to a LICENSE change in a newer
1272 # version that is not masked for any other reason.
1273 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1274 if other_pkg is not None and pkg < other_pkg:
1275 self._dynamic_config._masked_license_updates.add(other_pkg)
1277 def _add_parent_atom(self, pkg, parent_atom):
1278 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1279 if parent_atoms is None:
1280 parent_atoms = set()
1281 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1282 parent_atoms.add(parent_atom)
1284 def _add_slot_conflict(self, pkg):
1285 self._dynamic_config._slot_collision_nodes.add(pkg)
1286 slot_key = (pkg.slot_atom, pkg.root)
1287 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1288 if slot_nodes is None:
1290 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1291 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1294 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1296 mytype = pkg.type_name
1299 metadata = pkg.metadata
1300 myuse = self._pkg_use_enabled(pkg)
1302 depth = pkg.depth + 1
1303 removal_action = "remove" in self._dynamic_config.myparams
1306 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1308 edepend[k] = metadata[k]
1310 if not pkg.built and \
1311 "--buildpkgonly" in self._frozen_config.myopts and \
1312 "deep" not in self._dynamic_config.myparams:
1313 edepend["RDEPEND"] = ""
1314 edepend["PDEPEND"] = ""
1316 ignore_build_time_deps = False
1317 if pkg.built and not removal_action:
1318 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1319 # Pull in build time deps as requested, but marked them as
1320 # "optional" since they are not strictly required. This allows
1321 # more freedom in the merge order calculation for solving
1322 # circular dependencies. Don't convert to PDEPEND since that
1323 # could make --with-bdeps=y less effective if it is used to
1324 # adjust merge order to prevent built_with_use() calls from
1328 ignore_build_time_deps = True
1330 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1331 # Removal actions never traverse ignored buildtime
1332 # dependencies, so it's safe to discard them early.
1333 edepend["DEPEND"] = ""
1334 ignore_build_time_deps = True
1337 depend_root = myroot
1340 root_deps = self._frozen_config.myopts.get("--root-deps")
1341 if root_deps is not None:
1342 if root_deps is True:
1343 depend_root = myroot
1344 elif root_deps == "rdeps":
1345 ignore_build_time_deps = True
1347 # If rebuild mode is not enabled, it's safe to discard ignored
1348 # build-time dependencies. If you want these deps to be traversed
1349 # in "complete" mode then you need to specify --with-bdeps=y.
1350 if ignore_build_time_deps and \
1351 not self._rebuild.rebuild:
1352 edepend["DEPEND"] = ""
1355 (depend_root, edepend["DEPEND"],
1356 self._priority(buildtime=True,
1357 optional=(pkg.built or ignore_build_time_deps),
1358 ignored=ignore_build_time_deps)),
1359 (myroot, edepend["RDEPEND"],
1360 self._priority(runtime=True)),
1361 (myroot, edepend["PDEPEND"],
1362 self._priority(runtime_post=True))
1365 debug = "--debug" in self._frozen_config.myopts
1366 strict = mytype != "installed"
1368 for dep_root, dep_string, dep_priority in deps:
1372 writemsg_level("\nParent: %s\n" % (pkg,),
1373 noiselevel=-1, level=logging.DEBUG)
1374 writemsg_level("Depstring: %s\n" % (dep_string,),
1375 noiselevel=-1, level=logging.DEBUG)
1376 writemsg_level("Priority: %s\n" % (dep_priority,),
1377 noiselevel=-1, level=logging.DEBUG)
1380 dep_string = portage.dep.use_reduce(dep_string,
1381 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1382 except portage.exception.InvalidDependString as e:
1383 if not pkg.installed:
1384 # should have been masked before it was selected
1388 # Try again, but omit the is_valid_flag argument, since
1389 # invalid USE conditionals are a common problem and it's
1390 # practical to ignore this issue for installed packages.
1392 dep_string = portage.dep.use_reduce(dep_string,
1393 uselist=self._pkg_use_enabled(pkg))
1394 except portage.exception.InvalidDependString as e:
1395 self._dynamic_config._masked_installed.add(pkg)
1400 dep_string = list(self._queue_disjunctive_deps(
1401 pkg, dep_root, dep_priority, dep_string))
1402 except portage.exception.InvalidDependString as e:
1404 self._dynamic_config._masked_installed.add(pkg)
1408 # should have been masked before it was selected
1414 dep_string = portage.dep.paren_enclose(dep_string,
1415 unevaluated_atom=True)
1417 if not self._add_pkg_dep_string(
1418 pkg, dep_root, dep_priority, dep_string,
1422 self._dynamic_config._traversed_pkg_deps.add(pkg)
1425 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1427 _autounmask_backup = self._dynamic_config._autounmask
1428 if dep_priority.optional or dep_priority.ignored:
1429 # Temporarily disable autounmask for deps that
1430 # don't necessarily need to be satisfied.
1431 self._dynamic_config._autounmask = False
1433 return self._wrapped_add_pkg_dep_string(
1434 pkg, dep_root, dep_priority, dep_string,
1437 self._dynamic_config._autounmask = _autounmask_backup
1439 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1440 dep_string, allow_unsatisfied):
1441 depth = pkg.depth + 1
1442 deep = self._dynamic_config.myparams.get("deep", 0)
1443 recurse_satisfied = deep is True or depth <= deep
1444 debug = "--debug" in self._frozen_config.myopts
1445 strict = pkg.type_name != "installed"
1448 writemsg_level("\nParent: %s\n" % (pkg,),
1449 noiselevel=-1, level=logging.DEBUG)
1450 writemsg_level("Depstring: %s\n" % (dep_string,),
1451 noiselevel=-1, level=logging.DEBUG)
1452 writemsg_level("Priority: %s\n" % (dep_priority,),
1453 noiselevel=-1, level=logging.DEBUG)
1456 selected_atoms = self._select_atoms(dep_root,
1457 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1458 strict=strict, priority=dep_priority)
1459 except portage.exception.InvalidDependString as e:
1461 self._dynamic_config._masked_installed.add(pkg)
1464 # should have been masked before it was selected
1468 writemsg_level("Candidates: %s\n" % \
1469 ([str(x) for x in selected_atoms[pkg]],),
1470 noiselevel=-1, level=logging.DEBUG)
1472 root_config = self._frozen_config.roots[dep_root]
1473 vardb = root_config.trees["vartree"].dbapi
1474 traversed_virt_pkgs = set()
1476 reinstall_atoms = self._frozen_config.reinstall_atoms
1477 for atom, child in self._minimize_children(
1478 pkg, dep_priority, root_config, selected_atoms[pkg]):
1480 # If this was a specially generated virtual atom
1481 # from dep_check, map it back to the original, in
1482 # order to avoid distortion in places like display
1483 # or conflict resolution code.
1484 is_virt = hasattr(atom, '_orig_atom')
1485 atom = getattr(atom, '_orig_atom', atom)
1487 if atom.blocker and \
1488 (dep_priority.optional or dep_priority.ignored):
1489 # For --with-bdeps, ignore build-time only blockers
1490 # that originate from built packages.
1493 mypriority = dep_priority.copy()
1494 if not atom.blocker:
1495 inst_pkgs = [inst_pkg for inst_pkg in
1496 reversed(vardb.match_pkgs(atom))
1497 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1498 modified_use=self._pkg_use_enabled(inst_pkg))]
1500 for inst_pkg in inst_pkgs:
1501 if self._pkg_visibility_check(inst_pkg):
1503 mypriority.satisfied = inst_pkg
1505 if not mypriority.satisfied:
1506 # none visible, so use highest
1507 mypriority.satisfied = inst_pkgs[0]
1509 dep = Dependency(atom=atom,
1510 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1511 priority=mypriority, root=dep_root)
1513 # In some cases, dep_check will return deps that shouldn't
1514 # be proccessed any further, so they are identified and
1515 # discarded here. Try to discard as few as possible since
1516 # discarded dependencies reduce the amount of information
1517 # available for optimization of merge order.
1519 if not atom.blocker and \
1520 not recurse_satisfied and \
1521 mypriority.satisfied and \
1522 mypriority.satisfied.visible and \
1523 dep.child is not None and \
1524 not dep.child.installed and \
1525 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1526 dep.child.slot_atom) is None:
1528 if dep.root == self._frozen_config.target_root:
1530 myarg = next(self._iter_atoms_for_pkg(dep.child))
1531 except StopIteration:
1533 except InvalidDependString:
1534 if not dep.child.installed:
1535 # This shouldn't happen since the package
1536 # should have been masked.
1540 # Existing child selection may not be valid unless
1541 # it's added to the graph immediately, since "complete"
1542 # mode may select a different child later.
1545 self._dynamic_config._ignored_deps.append(dep)
1548 if dep_priority.ignored and \
1549 not self._dynamic_config._traverse_ignored_deps:
1550 if is_virt and dep.child is not None:
1551 traversed_virt_pkgs.add(dep.child)
1553 self._dynamic_config._ignored_deps.append(dep)
1555 if not self._add_dep(dep,
1556 allow_unsatisfied=allow_unsatisfied):
1558 if is_virt and dep.child is not None:
1559 traversed_virt_pkgs.add(dep.child)
1561 selected_atoms.pop(pkg)
1563 # Add selected indirect virtual deps to the graph. This
1564 # takes advantage of circular dependency avoidance that's done
1565 # by dep_zapdeps. We preserve actual parent/child relationships
1566 # here in order to avoid distorting the dependency graph like
1567 # <=portage-2.1.6.x did.
1568 for virt_dep, atoms in selected_atoms.items():
1570 virt_pkg = virt_dep.child
1571 if virt_pkg not in traversed_virt_pkgs:
1575 writemsg_level("\nCandidates: %s: %s\n" % \
1576 (virt_pkg.cpv, [str(x) for x in atoms]),
1577 noiselevel=-1, level=logging.DEBUG)
1579 if not dep_priority.ignored or \
1580 self._dynamic_config._traverse_ignored_deps:
1582 inst_pkgs = [inst_pkg for inst_pkg in
1583 reversed(vardb.match_pkgs(virt_dep.atom))
1584 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1585 modified_use=self._pkg_use_enabled(inst_pkg))]
1587 for inst_pkg in inst_pkgs:
1588 if self._pkg_visibility_check(inst_pkg):
1590 virt_dep.priority.satisfied = inst_pkg
1592 if not virt_dep.priority.satisfied:
1593 # none visible, so use highest
1594 virt_dep.priority.satisfied = inst_pkgs[0]
1596 if not self._add_pkg(virt_pkg, virt_dep):
1599 for atom, child in self._minimize_children(
1600 pkg, self._priority(runtime=True), root_config, atoms):
1602 # If this was a specially generated virtual atom
1603 # from dep_check, map it back to the original, in
1604 # order to avoid distortion in places like display
1605 # or conflict resolution code.
1606 is_virt = hasattr(atom, '_orig_atom')
1607 atom = getattr(atom, '_orig_atom', atom)
1609 # This is a GLEP 37 virtual, so its deps are all runtime.
1610 mypriority = self._priority(runtime=True)
1611 if not atom.blocker:
1612 inst_pkgs = [inst_pkg for inst_pkg in
1613 reversed(vardb.match_pkgs(atom))
1614 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1615 modified_use=self._pkg_use_enabled(inst_pkg))]
1617 for inst_pkg in inst_pkgs:
1618 if self._pkg_visibility_check(inst_pkg):
1620 mypriority.satisfied = inst_pkg
1622 if not mypriority.satisfied:
1623 # none visible, so use highest
1624 mypriority.satisfied = inst_pkgs[0]
1626 # Dependencies of virtuals are considered to have the
1627 # same depth as the virtual itself.
1628 dep = Dependency(atom=atom,
1629 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1630 parent=virt_pkg, priority=mypriority, root=dep_root,
1631 collapsed_parent=pkg, collapsed_priority=dep_priority)
1634 if not atom.blocker and \
1635 not recurse_satisfied and \
1636 mypriority.satisfied and \
1637 mypriority.satisfied.visible and \
1638 dep.child is not None and \
1639 not dep.child.installed and \
1640 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1641 dep.child.slot_atom) is None:
1643 if dep.root == self._frozen_config.target_root:
1645 myarg = next(self._iter_atoms_for_pkg(dep.child))
1646 except StopIteration:
1648 except InvalidDependString:
1649 if not dep.child.installed:
1655 self._dynamic_config._ignored_deps.append(dep)
1658 if dep_priority.ignored and \
1659 not self._dynamic_config._traverse_ignored_deps:
1660 if is_virt and dep.child is not None:
1661 traversed_virt_pkgs.add(dep.child)
1663 self._dynamic_config._ignored_deps.append(dep)
1665 if not self._add_dep(dep,
1666 allow_unsatisfied=allow_unsatisfied):
1668 if is_virt and dep.child is not None:
1669 traversed_virt_pkgs.add(dep.child)
1672 writemsg_level("\nExiting... %s\n" % (pkg,),
1673 noiselevel=-1, level=logging.DEBUG)
1677 def _minimize_children(self, parent, priority, root_config, atoms):
1679 Selects packages to satisfy the given atoms, and minimizes the
1680 number of selected packages. This serves to identify and eliminate
1681 redundant package selections when multiple atoms happen to specify
1691 dep_pkg, existing_node = self._select_package(
1692 root_config.root, atom)
1696 atom_pkg_map[atom] = dep_pkg
1698 if len(atom_pkg_map) < 2:
1699 for item in atom_pkg_map.items():
1705 for atom, pkg in atom_pkg_map.items():
1706 pkg_atom_map.setdefault(pkg, set()).add(atom)
1707 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1709 for cp, pkgs in cp_pkg_map.items():
1712 for atom in pkg_atom_map[pkg]:
1716 # Use a digraph to identify and eliminate any
1717 # redundant package selections.
1718 atom_pkg_graph = digraph()
1721 for atom in pkg_atom_map[pkg1]:
1723 atom_pkg_graph.add(pkg1, atom)
1724 atom_set = InternalPackageSet(initial_atoms=(atom,),
1729 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1730 atom_pkg_graph.add(pkg2, atom)
1733 eliminate_pkg = True
1734 for atom in atom_pkg_graph.parent_nodes(pkg):
1735 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1736 eliminate_pkg = False
1739 atom_pkg_graph.remove(pkg)
1741 # Yield ~, =*, < and <= atoms first, since those are more likely to
1742 # cause slot conflicts, and we want those atoms to be displayed
1743 # in the resulting slot conflict message (see bug #291142).
1746 for atom in cp_atoms:
1748 for child_pkg in atom_pkg_graph.child_nodes(atom):
1749 existing_node, matches = \
1750 self._check_slot_conflict(child_pkg, atom)
1751 if existing_node and not matches:
1755 conflict_atoms.append(atom)
1757 normal_atoms.append(atom)
1759 for atom in chain(conflict_atoms, normal_atoms):
1760 child_pkgs = atom_pkg_graph.child_nodes(atom)
1761 # if more than one child, yield highest version
1762 if len(child_pkgs) > 1:
1764 yield (atom, child_pkgs[-1])
1766 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1768 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1769 Yields non-disjunctive deps. Raises InvalidDependString when
1773 while i < len(dep_struct):
1775 if isinstance(x, list):
1776 for y in self._queue_disjunctive_deps(
1777 pkg, dep_root, dep_priority, x):
1780 self._queue_disjunction(pkg, dep_root, dep_priority,
1781 [ x, dep_struct[ i + 1 ] ] )
1785 x = portage.dep.Atom(x)
1786 except portage.exception.InvalidAtom:
1787 if not pkg.installed:
1788 raise portage.exception.InvalidDependString(
1789 "invalid atom: '%s'" % x)
1791 # Note: Eventually this will check for PROPERTIES=virtual
1792 # or whatever other metadata gets implemented for this
1794 if x.cp.startswith('virtual/'):
1795 self._queue_disjunction( pkg, dep_root,
1796 dep_priority, [ str(x) ] )
1801 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1802 self._dynamic_config._dep_disjunctive_stack.append(
1803 (pkg, dep_root, dep_priority, dep_struct))
1805 def _pop_disjunction(self, allow_unsatisfied):
1807 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1808 populate self._dynamic_config._dep_stack.
1810 pkg, dep_root, dep_priority, dep_struct = \
1811 self._dynamic_config._dep_disjunctive_stack.pop()
1812 dep_string = portage.dep.paren_enclose(dep_struct,
1813 unevaluated_atom=True)
1814 if not self._add_pkg_dep_string(
1815 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1819 def _priority(self, **kwargs):
1820 if "remove" in self._dynamic_config.myparams:
1821 priority_constructor = UnmergeDepPriority
1823 priority_constructor = DepPriority
1824 return priority_constructor(**kwargs)
1826 def _dep_expand(self, root_config, atom_without_category):
1828 @param root_config: a root config instance
1829 @type root_config: RootConfig
1830 @param atom_without_category: an atom without a category component
1831 @type atom_without_category: String
1833 @returns: a list of atoms containing categories (possibly empty)
1835 null_cp = portage.dep_getkey(insert_category_into_atom(
1836 atom_without_category, "null"))
1837 cat, atom_pn = portage.catsplit(null_cp)
1839 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1841 for db, pkg_type, built, installed, db_keys in dbs:
1842 for cat in db.categories:
1843 if db.cp_list("%s/%s" % (cat, atom_pn)):
1847 for cat in categories:
1848 deps.append(Atom(insert_category_into_atom(
1849 atom_without_category, cat), allow_repo=True))
1852 def _have_new_virt(self, root, atom_cp):
1854 for db, pkg_type, built, installed, db_keys in \
1855 self._dynamic_config._filtered_trees[root]["dbs"]:
1856 if db.cp_list(atom_cp):
1861 def _iter_atoms_for_pkg(self, pkg):
1862 depgraph_sets = self._dynamic_config.sets[pkg.root]
1863 atom_arg_map = depgraph_sets.atom_arg_map
1864 root_config = self._frozen_config.roots[pkg.root]
1865 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1866 if atom.cp != pkg.cp and \
1867 self._have_new_virt(pkg.root, atom.cp):
1870 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1871 visible_pkgs.reverse() # descending order
1873 for visible_pkg in visible_pkgs:
1874 if visible_pkg.cp != atom.cp:
1876 if pkg >= visible_pkg:
1877 # This is descending order, and we're not
1878 # interested in any versions <= pkg given.
1880 if pkg.slot_atom != visible_pkg.slot_atom:
1881 higher_slot = visible_pkg
1883 if higher_slot is not None:
1885 for arg in atom_arg_map[(atom, pkg.root)]:
1886 if isinstance(arg, PackageArg) and \
1891 def select_files(self, myfiles):
1892 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1893 self._dynamic_config._initial_arg_list and call self._resolve to create the
1894 appropriate depgraph and return a favorite list."""
1896 debug = "--debug" in self._frozen_config.myopts
1897 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1898 sets = root_config.sets
1899 depgraph_sets = self._dynamic_config.sets[root_config.root]
1901 myroot = self._frozen_config.target_root
1902 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1903 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1904 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1905 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1906 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1907 pkgsettings = self._frozen_config.pkgsettings[myroot]
1909 onlydeps = "--onlydeps" in self._frozen_config.myopts
1912 ext = os.path.splitext(x)[1]
1914 if not os.path.exists(x):
1916 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1917 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1918 elif os.path.exists(
1919 os.path.join(pkgsettings["PKGDIR"], x)):
1920 x = os.path.join(pkgsettings["PKGDIR"], x)
1922 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1923 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1924 return 0, myfavorites
1925 mytbz2=portage.xpak.tbz2(x)
1926 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1927 if os.path.realpath(x) != \
1928 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1929 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1930 self._dynamic_config._skip_restart = True
1931 return 0, myfavorites
1933 pkg = self._pkg(mykey, "binary", root_config,
1935 args.append(PackageArg(arg=x, package=pkg,
1936 root_config=root_config))
1937 elif ext==".ebuild":
1938 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1939 pkgdir = os.path.dirname(ebuild_path)
1940 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1941 cp = pkgdir[len(tree_root)+1:]
1942 e = portage.exception.PackageNotFound(
1943 ("%s is not in a valid portage tree " + \
1944 "hierarchy or does not exist") % x)
1945 if not portage.isvalidatom(cp):
1947 cat = portage.catsplit(cp)[0]
1948 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1949 if not portage.isvalidatom("="+mykey):
1951 ebuild_path = portdb.findname(mykey)
1953 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1954 cp, os.path.basename(ebuild_path)):
1955 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1956 self._dynamic_config._skip_restart = True
1957 return 0, myfavorites
1958 if mykey not in portdb.xmatch(
1959 "match-visible", portage.cpv_getkey(mykey)):
1960 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1961 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1962 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1963 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1966 raise portage.exception.PackageNotFound(
1967 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1968 pkg = self._pkg(mykey, "ebuild", root_config,
1969 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
1970 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
1971 args.append(PackageArg(arg=x, package=pkg,
1972 root_config=root_config))
1973 elif x.startswith(os.path.sep):
1974 if not x.startswith(myroot):
1975 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1976 " $ROOT.\n") % x, noiselevel=-1)
1977 self._dynamic_config._skip_restart = True
1979 # Queue these up since it's most efficient to handle
1980 # multiple files in a single iter_owners() call.
1981 lookup_owners.append(x)
1982 elif x.startswith("." + os.sep) or \
1983 x.startswith(".." + os.sep):
1984 f = os.path.abspath(x)
1985 if not f.startswith(myroot):
1986 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
1987 " $ROOT.\n") % (f, x), noiselevel=-1)
1988 self._dynamic_config._skip_restart = True
1990 lookup_owners.append(f)
1992 if x in ("system", "world"):
1994 if x.startswith(SETPREFIX):
1995 s = x[len(SETPREFIX):]
1997 raise portage.exception.PackageSetNotFound(s)
1998 if s in depgraph_sets.sets:
2001 depgraph_sets.sets[s] = pset
2002 args.append(SetArg(arg=x, pset=pset,
2003 root_config=root_config))
2005 if not is_valid_package_atom(x, allow_repo=True):
2006 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2008 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2009 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2010 self._dynamic_config._skip_restart = True
2012 # Don't expand categories or old-style virtuals here unless
2013 # necessary. Expansion of old-style virtuals here causes at
2014 # least the following problems:
2015 # 1) It's more difficult to determine which set(s) an atom
2016 # came from, if any.
2017 # 2) It takes away freedom from the resolver to choose other
2018 # possible expansions when necessary.
2020 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2021 root_config=root_config))
2023 expanded_atoms = self._dep_expand(root_config, x)
2024 installed_cp_set = set()
2025 for atom in expanded_atoms:
2026 if vardb.cp_list(atom.cp):
2027 installed_cp_set.add(atom.cp)
2029 if len(installed_cp_set) > 1:
2030 non_virtual_cps = set()
2031 for atom_cp in installed_cp_set:
2032 if not atom_cp.startswith("virtual/"):
2033 non_virtual_cps.add(atom_cp)
2034 if len(non_virtual_cps) == 1:
2035 installed_cp_set = non_virtual_cps
2037 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2038 installed_cp = next(iter(installed_cp_set))
2039 for atom in expanded_atoms:
2040 if atom.cp == installed_cp:
2042 for pkg in self._iter_match_pkgs_any(
2043 root_config, atom.without_use,
2045 if not pkg.installed:
2049 expanded_atoms = [atom]
2052 # If a non-virtual package and one or more virtual packages
2053 # are in expanded_atoms, use the non-virtual package.
2054 if len(expanded_atoms) > 1:
2055 number_of_virtuals = 0
2056 for expanded_atom in expanded_atoms:
2057 if expanded_atom.cp.startswith("virtual/"):
2058 number_of_virtuals += 1
2060 candidate = expanded_atom
2061 if len(expanded_atoms) - number_of_virtuals == 1:
2062 expanded_atoms = [ candidate ]
2064 if len(expanded_atoms) > 1:
2065 writemsg("\n\n", noiselevel=-1)
2066 ambiguous_package_name(x, expanded_atoms, root_config,
2067 self._frozen_config.spinner, self._frozen_config.myopts)
2068 self._dynamic_config._skip_restart = True
2069 return False, myfavorites
2071 atom = expanded_atoms[0]
2073 null_atom = Atom(insert_category_into_atom(x, "null"),
2075 cat, atom_pn = portage.catsplit(null_atom.cp)
2076 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2078 # Allow the depgraph to choose which virtual.
2079 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2084 if atom.use and atom.use.conditional:
2086 ("\n\n!!! '%s' contains a conditional " + \
2087 "which is not allowed.\n") % (x,), noiselevel=-1)
2088 writemsg("!!! Please check ebuild(5) for full details.\n")
2089 self._dynamic_config._skip_restart = True
2092 args.append(AtomArg(arg=x, atom=atom,
2093 root_config=root_config))
2097 search_for_multiple = False
2098 if len(lookup_owners) > 1:
2099 search_for_multiple = True
2101 for x in lookup_owners:
2102 if not search_for_multiple and os.path.isdir(x):
2103 search_for_multiple = True
2104 relative_paths.append(x[len(myroot)-1:])
2107 for pkg, relative_path in \
2108 real_vardb._owners.iter_owners(relative_paths):
2109 owners.add(pkg.mycpv)
2110 if not search_for_multiple:
2114 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2115 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2116 self._dynamic_config._skip_restart = True
2120 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2122 # portage now masks packages with missing slot, but it's
2123 # possible that one was installed by an older version
2124 atom = Atom(portage.cpv_getkey(cpv))
2126 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2127 args.append(AtomArg(arg=atom, atom=atom,
2128 root_config=root_config))
2130 if "--update" in self._frozen_config.myopts:
2131 # In some cases, the greedy slots behavior can pull in a slot that
2132 # the user would want to uninstall due to it being blocked by a
2133 # newer version in a different slot. Therefore, it's necessary to
2134 # detect and discard any that should be uninstalled. Each time
2135 # that arguments are updated, package selections are repeated in
2136 # order to ensure consistency with the current arguments:
2138 # 1) Initialize args
2139 # 2) Select packages and generate initial greedy atoms
2140 # 3) Update args with greedy atoms
2141 # 4) Select packages and generate greedy atoms again, while
2142 # accounting for any blockers between selected packages
2143 # 5) Update args with revised greedy atoms
2145 self._set_args(args)
2148 greedy_args.append(arg)
2149 if not isinstance(arg, AtomArg):
2151 for atom in self._greedy_slots(arg.root_config, arg.atom):
2153 AtomArg(arg=arg.arg, atom=atom,
2154 root_config=arg.root_config))
2156 self._set_args(greedy_args)
2159 # Revise greedy atoms, accounting for any blockers
2160 # between selected packages.
2161 revised_greedy_args = []
2163 revised_greedy_args.append(arg)
2164 if not isinstance(arg, AtomArg):
2166 for atom in self._greedy_slots(arg.root_config, arg.atom,
2167 blocker_lookahead=True):
2168 revised_greedy_args.append(
2169 AtomArg(arg=arg.arg, atom=atom,
2170 root_config=arg.root_config))
2171 args = revised_greedy_args
2172 del revised_greedy_args
2174 self._set_args(args)
2176 myfavorites = set(myfavorites)
2178 if isinstance(arg, (AtomArg, PackageArg)):
2179 myfavorites.add(arg.atom)
2180 elif isinstance(arg, SetArg):
2181 myfavorites.add(arg.arg)
2182 myfavorites = list(myfavorites)
2185 portage.writemsg("\n", noiselevel=-1)
2186 # Order needs to be preserved since a feature of --nodeps
2187 # is to allow the user to force a specific merge order.
2188 self._dynamic_config._initial_arg_list = args[:]
2190 return self._resolve(myfavorites)
2192 def _resolve(self, myfavorites):
2193 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2194 call self._creategraph to process theier deps and return
2196 debug = "--debug" in self._frozen_config.myopts
2197 onlydeps = "--onlydeps" in self._frozen_config.myopts
2198 myroot = self._frozen_config.target_root
2199 pkgsettings = self._frozen_config.pkgsettings[myroot]
2200 pprovideddict = pkgsettings.pprovideddict
2201 virtuals = pkgsettings.getvirtuals()
2202 args = self._dynamic_config._initial_arg_list[:]
2203 for root, atom in chain(self._rebuild.rebuild_list,
2204 self._rebuild.reinstall_list):
2205 args.append(AtomArg(arg=atom, atom=atom,
2206 root_config=self._frozen_config.roots[root]))
2207 for arg in self._expand_set_args(args, add_to_digraph=True):
2208 for atom in arg.pset.getAtoms():
2209 self._spinner_update()
2210 dep = Dependency(atom=atom, onlydeps=onlydeps,
2211 root=myroot, parent=arg)
2213 pprovided = pprovideddict.get(atom.cp)
2214 if pprovided and portage.match_from_list(atom, pprovided):
2215 # A provided package has been specified on the command line.
2216 self._dynamic_config._pprovided_args.append((arg, atom))
2218 if isinstance(arg, PackageArg):
2219 if not self._add_pkg(arg.package, dep) or \
2220 not self._create_graph():
2221 if not self.need_restart():
2222 sys.stderr.write(("\n\n!!! Problem " + \
2223 "resolving dependencies for %s\n") % \
2225 return 0, myfavorites
2228 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2229 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2230 pkg, existing_node = self._select_package(
2231 myroot, atom, onlydeps=onlydeps)
2233 pprovided_match = False
2234 for virt_choice in virtuals.get(atom.cp, []):
2235 expanded_atom = portage.dep.Atom(
2236 atom.replace(atom.cp, virt_choice.cp, 1))
2237 pprovided = pprovideddict.get(expanded_atom.cp)
2239 portage.match_from_list(expanded_atom, pprovided):
2240 # A provided package has been
2241 # specified on the command line.
2242 self._dynamic_config._pprovided_args.append((arg, atom))
2243 pprovided_match = True
2248 if not (isinstance(arg, SetArg) and \
2249 arg.name in ("selected", "system", "world")):
2250 self._dynamic_config._unsatisfied_deps_for_display.append(
2251 ((myroot, atom), {"myparent" : arg}))
2252 return 0, myfavorites
2254 self._dynamic_config._missing_args.append((arg, atom))
2256 if atom.cp != pkg.cp:
2257 # For old-style virtuals, we need to repeat the
2258 # package.provided check against the selected package.
2259 expanded_atom = atom.replace(atom.cp, pkg.cp)
2260 pprovided = pprovideddict.get(pkg.cp)
2262 portage.match_from_list(expanded_atom, pprovided):
2263 # A provided package has been
2264 # specified on the command line.
2265 self._dynamic_config._pprovided_args.append((arg, atom))
2267 if pkg.installed and \
2268 "selective" not in self._dynamic_config.myparams and \
2269 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2270 pkg, modified_use=self._pkg_use_enabled(pkg)):
2271 self._dynamic_config._unsatisfied_deps_for_display.append(
2272 ((myroot, atom), {"myparent" : arg}))
2273 # Previous behavior was to bail out in this case, but
2274 # since the dep is satisfied by the installed package,
2275 # it's more friendly to continue building the graph
2276 # and just show a warning message. Therefore, only bail
2277 # out here if the atom is not from either the system or
2279 if not (isinstance(arg, SetArg) and \
2280 arg.name in ("selected", "system", "world")):
2281 return 0, myfavorites
2283 # Add the selected package to the graph as soon as possible
2284 # so that later dep_check() calls can use it as feedback
2285 # for making more consistent atom selections.
2286 if not self._add_pkg(pkg, dep):
2287 if self.need_restart():
2289 elif isinstance(arg, SetArg):
2290 writemsg(("\n\n!!! Problem resolving " + \
2291 "dependencies for %s from %s\n") % \
2292 (atom, arg.arg), noiselevel=-1)
2294 writemsg(("\n\n!!! Problem resolving " + \
2295 "dependencies for %s\n") % \
2296 (atom,), noiselevel=-1)
2297 return 0, myfavorites
2299 except SystemExit as e:
2300 raise # Needed else can't exit
2301 except Exception as e:
2302 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2303 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2306 # Now that the root packages have been added to the graph,
2307 # process the dependencies.
2308 if not self._create_graph():
2309 return 0, myfavorites
2313 except self._unknown_internal_error:
2314 return False, myfavorites
2316 digraph_set = frozenset(self._dynamic_config.digraph)
2318 if digraph_set.intersection(
2319 self._dynamic_config._needed_unstable_keywords) or \
2320 digraph_set.intersection(
2321 self._dynamic_config._needed_p_mask_changes) or \
2322 digraph_set.intersection(
2323 self._dynamic_config._needed_use_config_changes) or \
2324 digraph_set.intersection(
2325 self._dynamic_config._needed_license_changes) :
2326 #We failed if the user needs to change the configuration
2327 self._dynamic_config._success_without_autounmask = True
2328 return False, myfavorites
2332 if self._rebuild.trigger_rebuilds():
2333 backtrack_infos = self._dynamic_config._backtrack_infos
2334 config = backtrack_infos.setdefault("config", {})
2335 config["rebuild_list"] = self._rebuild.rebuild_list
2336 config["reinstall_list"] = self._rebuild.reinstall_list
2337 self._dynamic_config._need_restart = True
2338 return False, myfavorites
2340 # We're true here unless we are missing binaries.
2341 return (True, myfavorites)
2343 def _set_args(self, args):
2345 Create the "__non_set_args__" package set from atoms and packages given as
2346 arguments. This method can be called multiple times if necessary.
2347 The package selection cache is automatically invalidated, since
2348 arguments influence package selections.
2353 for root in self._dynamic_config.sets:
2354 depgraph_sets = self._dynamic_config.sets[root]
2355 depgraph_sets.sets.setdefault('__non_set_args__',
2356 InternalPackageSet(allow_repo=True)).clear()
2357 depgraph_sets.atoms.clear()
2358 depgraph_sets.atom_arg_map.clear()
2359 set_atoms[root] = []
2360 non_set_atoms[root] = []
2362 # We don't add set args to the digraph here since that
2363 # happens at a later stage and we don't want to make
2364 # any state changes here that aren't reversed by a
2365 # another call to this method.
2366 for arg in self._expand_set_args(args, add_to_digraph=False):
2367 atom_arg_map = self._dynamic_config.sets[
2368 arg.root_config.root].atom_arg_map
2369 if isinstance(arg, SetArg):
2370 atom_group = set_atoms[arg.root_config.root]
2372 atom_group = non_set_atoms[arg.root_config.root]
2374 for atom in arg.pset.getAtoms():
2375 atom_group.append(atom)
2376 atom_key = (atom, arg.root_config.root)
2377 refs = atom_arg_map.get(atom_key)
2380 atom_arg_map[atom_key] = refs
2384 for root in self._dynamic_config.sets:
2385 depgraph_sets = self._dynamic_config.sets[root]
2386 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2387 non_set_atoms.get(root, [])))
2388 depgraph_sets.sets['__non_set_args__'].update(
2389 non_set_atoms.get(root, []))
2391 # Invalidate the package selection cache, since
2392 # arguments influence package selections.
2393 self._dynamic_config._highest_pkg_cache.clear()
2394 for trees in self._dynamic_config._filtered_trees.values():
2395 trees["porttree"].dbapi._clear_cache()
2397 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2399 Return a list of slot atoms corresponding to installed slots that
2400 differ from the slot of the highest visible match. When
2401 blocker_lookahead is True, slot atoms that would trigger a blocker
2402 conflict are automatically discarded, potentially allowing automatic
2403 uninstallation of older slots when appropriate.
2405 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2406 if highest_pkg is None:
2408 vardb = root_config.trees["vartree"].dbapi
2410 for cpv in vardb.match(atom):
2411 # don't mix new virtuals with old virtuals
2412 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2413 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2415 slots.add(highest_pkg.metadata["SLOT"])
2419 slots.remove(highest_pkg.metadata["SLOT"])
2422 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2423 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2424 if pkg is not None and \
2425 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2426 greedy_pkgs.append(pkg)
2429 if not blocker_lookahead:
2430 return [pkg.slot_atom for pkg in greedy_pkgs]
2433 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2434 for pkg in greedy_pkgs + [highest_pkg]:
2435 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2437 selected_atoms = self._select_atoms(
2438 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2439 parent=pkg, strict=True)
2440 except portage.exception.InvalidDependString:
2443 for atoms in selected_atoms.values():
2444 blocker_atoms.extend(x for x in atoms if x.blocker)
2445 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2447 if highest_pkg not in blockers:
2450 # filter packages with invalid deps
2451 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2453 # filter packages that conflict with highest_pkg
2454 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2455 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2456 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2461 # If two packages conflict, discard the lower version.
2462 discard_pkgs = set()
2463 greedy_pkgs.sort(reverse=True)
2464 for i in range(len(greedy_pkgs) - 1):
2465 pkg1 = greedy_pkgs[i]
2466 if pkg1 in discard_pkgs:
2468 for j in range(i + 1, len(greedy_pkgs)):
2469 pkg2 = greedy_pkgs[j]
2470 if pkg2 in discard_pkgs:
2472 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2473 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2475 discard_pkgs.add(pkg2)
2477 return [pkg.slot_atom for pkg in greedy_pkgs \
2478 if pkg not in discard_pkgs]
2480 def _select_atoms_from_graph(self, *pargs, **kwargs):
2482 Prefer atoms matching packages that have already been
2483 added to the graph or those that are installed and have
2484 not been scheduled for replacement.
2486 kwargs["trees"] = self._dynamic_config._graph_trees
2487 return self._select_atoms_highest_available(*pargs, **kwargs)
2489 def _select_atoms_highest_available(self, root, depstring,
2490 myuse=None, parent=None, strict=True, trees=None, priority=None):
2491 """This will raise InvalidDependString if necessary. If trees is
2492 None then self._dynamic_config._filtered_trees is used."""
2494 pkgsettings = self._frozen_config.pkgsettings[root]
2496 trees = self._dynamic_config._filtered_trees
2497 mytrees = trees[root]
2498 atom_graph = digraph()
2500 # Temporarily disable autounmask so that || preferences
2501 # account for masking and USE settings.
2502 _autounmask_backup = self._dynamic_config._autounmask
2503 self._dynamic_config._autounmask = False
2504 # backup state for restoration, in case of recursive
2505 # calls to this method
2506 backup_state = mytrees.copy()
2508 # clear state from previous call, in case this
2509 # call is recursive (we have a backup, that we
2510 # will use to restore it later)
2511 mytrees.pop("pkg_use_enabled", None)
2512 mytrees.pop("parent", None)
2513 mytrees.pop("atom_graph", None)
2514 mytrees.pop("priority", None)
2516 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2517 if parent is not None:
2518 mytrees["parent"] = parent
2519 mytrees["atom_graph"] = atom_graph
2520 if priority is not None:
2521 mytrees["priority"] = priority
2523 mycheck = portage.dep_check(depstring, None,
2524 pkgsettings, myuse=myuse,
2525 myroot=root, trees=trees)
2528 self._dynamic_config._autounmask = _autounmask_backup
2529 mytrees.pop("pkg_use_enabled", None)
2530 mytrees.pop("parent", None)
2531 mytrees.pop("atom_graph", None)
2532 mytrees.pop("priority", None)
2533 mytrees.update(backup_state)
2535 raise portage.exception.InvalidDependString(mycheck[1])
2537 selected_atoms = mycheck[1]
2538 elif parent not in atom_graph:
2539 selected_atoms = {parent : mycheck[1]}
2541 # Recursively traversed virtual dependencies, and their
2542 # direct dependencies, are considered to have the same
2543 # depth as direct dependencies.
2544 if parent.depth is None:
2547 virt_depth = parent.depth + 1
2548 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2549 selected_atoms = OrderedDict()
2550 node_stack = [(parent, None, None)]
2551 traversed_nodes = set()
2553 node, node_parent, parent_atom = node_stack.pop()
2554 traversed_nodes.add(node)
2558 if node_parent is parent:
2559 if priority is None:
2560 node_priority = None
2562 node_priority = priority.copy()
2564 # virtuals only have runtime deps
2565 node_priority = self._priority(runtime=True)
2567 k = Dependency(atom=parent_atom,
2568 blocker=parent_atom.blocker, child=node,
2569 depth=virt_depth, parent=node_parent,
2570 priority=node_priority, root=node.root)
2573 selected_atoms[k] = child_atoms
2574 for atom_node in atom_graph.child_nodes(node):
2575 child_atom = atom_node[0]
2576 if id(child_atom) not in chosen_atom_ids:
2578 child_atoms.append(child_atom)
2579 for child_node in atom_graph.child_nodes(atom_node):
2580 if child_node in traversed_nodes:
2582 if not portage.match_from_list(
2583 child_atom, [child_node]):
2584 # Typically this means that the atom
2585 # specifies USE deps that are unsatisfied
2586 # by the selected package. The caller will
2587 # record this as an unsatisfied dependency
2590 node_stack.append((child_node, node, child_atom))
2592 return selected_atoms
2594 def _expand_virt_from_graph(self, root, atom):
2595 if not isinstance(atom, Atom):
2597 graphdb = self._dynamic_config.mydbapi[root]
2598 match = graphdb.match_pkgs(atom)
2603 if not pkg.cpv.startswith("virtual/"):
2607 rdepend = self._select_atoms_from_graph(
2608 pkg.root, pkg.metadata.get("RDEPEND", ""),
2609 myuse=self._pkg_use_enabled(pkg),
2610 parent=pkg, strict=False)
2611 except InvalidDependString as e:
2612 writemsg_level("!!! Invalid RDEPEND in " + \
2613 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2614 (pkg.root, pkg.cpv, e),
2615 noiselevel=-1, level=logging.ERROR)
2619 for atoms in rdepend.values():
2621 if hasattr(atom, "_orig_atom"):
2622 # Ignore virtual atoms since we're only
2623 # interested in expanding the real atoms.
2627 def _virt_deps_visible(self, pkg, ignore_use=False):
2629 Assumes pkg is a virtual package. Traverses virtual deps recursively
2630 and returns True if all deps are visible, False otherwise. This is
2631 useful for checking if it will be necessary to expand virtual slots,
2632 for cases like bug #382557.
2635 rdepend = self._select_atoms(
2636 pkg.root, pkg.metadata.get("RDEPEND", ""),
2637 myuse=self._pkg_use_enabled(pkg),
2638 parent=pkg, priority=self._priority(runtime=True))
2639 except InvalidDependString as e:
2640 if not pkg.installed:
2642 writemsg_level("!!! Invalid RDEPEND in " + \
2643 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2644 (pkg.root, pkg.cpv, e),
2645 noiselevel=-1, level=logging.ERROR)
2648 for atoms in rdepend.values():
2651 atom = atom.without_use
2652 pkg, existing = self._select_package(
2654 if pkg is None or not self._pkg_visibility_check(pkg):
2659 def _get_dep_chain(self, start_node, target_atom=None,
2660 unsatisfied_dependency=False):
2662 Returns a list of (atom, node_type) pairs that represent a dep chain.
2663 If target_atom is None, the first package shown is pkg's parent.
2664 If target_atom is not None the first package shown is pkg.
2665 If unsatisfied_dependency is True, the first parent is select who's
2666 dependency is not satisfied by 'pkg'. This is need for USE changes.
2667 (Does not support target_atom.)
2669 traversed_nodes = set()
2673 all_parents = self._dynamic_config._parent_atoms
2675 if target_atom is not None and isinstance(node, Package):
2676 affecting_use = set()
2677 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2679 affecting_use.update(extract_affecting_use(
2680 node.metadata[dep_str], target_atom,
2681 eapi=node.metadata["EAPI"]))
2682 except InvalidDependString:
2683 if not node.installed:
2685 affecting_use.difference_update(node.use.mask, node.use.force)
2686 pkg_name = _unicode_decode("%s") % (node.cpv,)
2689 for flag in affecting_use:
2690 if flag in self._pkg_use_enabled(node):
2693 usedep.append("-"+flag)
2694 pkg_name += "[%s]" % ",".join(usedep)
2696 dep_chain.append((pkg_name, node.type_name))
2699 # To build a dep chain for the given package we take
2700 # "random" parents form the digraph, except for the
2701 # first package, because we want a parent that forced
2702 # the corresponding change (i.e '>=foo-2', instead 'foo').
2704 traversed_nodes.add(start_node)
2706 start_node_parent_atoms = {}
2707 for ppkg, patom in all_parents.get(node, []):
2708 # Get a list of suitable atoms. For use deps
2709 # (aka unsatisfied_dependency is not None) we
2710 # need that the start_node doesn't match the atom.
2711 if not unsatisfied_dependency or \
2712 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
2713 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
2715 if start_node_parent_atoms:
2716 # If there are parents in all_parents then use one of them.
2717 # If not, then this package got pulled in by an Arg and
2718 # will be correctly handled by the code that handles later
2719 # packages in the dep chain.
2720 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
2723 for ppkg in start_node_parent_atoms[best_match]:
2725 if ppkg in self._dynamic_config._initial_arg_list:
2726 # Stop if reached the top level of the dep chain.
2729 while node is not None:
2730 traversed_nodes.add(node)
2732 if isinstance(node, DependencyArg):
2733 if self._dynamic_config.digraph.parent_nodes(node):
2736 node_type = "argument"
2737 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2739 elif node is not start_node:
2740 for ppkg, patom in all_parents[child]:
2742 atom = patom.unevaluated_atom
2746 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2747 if priority.buildtime:
2748 dep_strings.add(node.metadata["DEPEND"])
2749 if priority.runtime:
2750 dep_strings.add(node.metadata["RDEPEND"])
2751 if priority.runtime_post:
2752 dep_strings.add(node.metadata["PDEPEND"])
2754 affecting_use = set()
2755 for dep_str in dep_strings:
2757 affecting_use.update(extract_affecting_use(
2758 dep_str, atom, eapi=node.metadata["EAPI"]))
2759 except InvalidDependString:
2760 if not node.installed:
2763 #Don't show flags as 'affecting' if the user can't change them,
2764 affecting_use.difference_update(node.use.mask, \
2767 pkg_name = _unicode_decode("%s") % (node.cpv,)
2770 for flag in affecting_use:
2771 if flag in self._pkg_use_enabled(node):
2774 usedep.append("-"+flag)
2775 pkg_name += "[%s]" % ",".join(usedep)
2777 dep_chain.append((pkg_name, node.type_name))
2779 if node not in self._dynamic_config.digraph:
2780 # The parent is not in the graph due to backtracking.
2783 # When traversing to parents, prefer arguments over packages
2784 # since arguments are root nodes. Never traverse the same
2785 # package twice, in order to prevent an infinite loop.
2787 selected_parent = None
2790 parent_unsatisfied = None
2792 for parent in self._dynamic_config.digraph.parent_nodes(node):
2793 if parent in traversed_nodes:
2795 if isinstance(parent, DependencyArg):
2798 if isinstance(parent, Package) and \
2799 parent.operation == "merge":
2800 parent_merge = parent
2801 if unsatisfied_dependency and node is start_node:
2802 # Make sure that pkg doesn't satisfy parent's dependency.
2803 # This ensures that we select the correct parent for use
2805 for ppkg, atom in all_parents[start_node]:
2807 atom_set = InternalPackageSet(initial_atoms=(atom,))
2808 if not atom_set.findAtomForPackage(start_node):
2809 parent_unsatisfied = parent
2812 selected_parent = parent
2814 if parent_unsatisfied is not None:
2815 selected_parent = parent_unsatisfied
2816 elif parent_merge is not None:
2817 # Prefer parent in the merge list (bug #354747).
2818 selected_parent = parent_merge
2819 elif parent_arg is not None:
2820 if self._dynamic_config.digraph.parent_nodes(parent_arg):
2821 selected_parent = parent_arg
2824 (_unicode_decode("%s") % (parent_arg,), "argument"))
2825 selected_parent = None
2827 node = selected_parent
2830 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2831 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2833 for node, node_type in dep_chain:
2834 if node_type == "argument":
2835 display_list.append("required by %s (argument)" % node)
2837 display_list.append("required by %s" % node)
2839 msg = "#" + ", ".join(display_list) + "\n"
2843 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2844 check_backtrack=False, check_autounmask_breakage=False):
2846 When check_backtrack=True, no output is produced and
2847 the method either returns or raises _backtrack_mask if
2848 a matching package has been masked by backtracking.
2850 backtrack_mask = False
2851 autounmask_broke_use_dep = False
2852 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
2854 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
2856 xinfo = '"%s"' % atom.unevaluated_atom
2859 if isinstance(myparent, AtomArg):
2860 xinfo = _unicode_decode('"%s"') % (myparent,)
2861 # Discard null/ from failed cpv_expand category expansion.
2862 xinfo = xinfo.replace("null/", "")
2864 xinfo = "%s for %s" % (xinfo, root)
2865 masked_packages = []
2867 missing_use_adjustable = set()
2868 required_use_unsatisfied = []
2869 masked_pkg_instances = set()
2870 missing_licenses = []
2871 have_eapi_mask = False
2872 pkgsettings = self._frozen_config.pkgsettings[root]
2873 root_config = self._frozen_config.roots[root]
2874 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2875 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2876 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2877 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2878 for db, pkg_type, built, installed, db_keys in dbs:
2882 if hasattr(db, "xmatch"):
2883 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
2885 cpv_list = db.match(atom.without_use)
2887 if atom.repo is None and hasattr(db, "getRepositories"):
2888 repo_list = db.getRepositories()
2890 repo_list = [atom.repo]
2894 for cpv in cpv_list:
2895 for repo in repo_list:
2896 if not db.cpv_exists(cpv, myrepo=repo):
2899 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2900 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2901 if metadata is not None and \
2902 portage.eapi_is_supported(metadata["EAPI"]):
2904 repo = metadata.get('repository')
2905 pkg = self._pkg(cpv, pkg_type, root_config,
2906 installed=installed, myrepo=repo)
2907 if not atom_set.findAtomForPackage(pkg,
2908 modified_use=self._pkg_use_enabled(pkg)):
2910 # pkg.metadata contains calculated USE for ebuilds,
2911 # required later for getMissingLicenses.
2912 metadata = pkg.metadata
2913 if pkg in self._dynamic_config._runtime_pkg_mask:
2914 backtrack_reasons = \
2915 self._dynamic_config._runtime_pkg_mask[pkg]
2916 mreasons.append('backtracking: %s' % \
2917 ', '.join(sorted(backtrack_reasons)))
2918 backtrack_mask = True
2919 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2920 modified_use=self._pkg_use_enabled(pkg)):
2921 mreasons = ["exclude option"]
2923 masked_pkg_instances.add(pkg)
2924 if atom.unevaluated_atom.use:
2926 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2927 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2928 missing_use.append(pkg)
2929 if atom_set_with_use.findAtomForPackage(pkg):
2930 autounmask_broke_use_dep = True
2934 writemsg("violated_conditionals raised " + \
2935 "InvalidAtom: '%s' parent: %s" % \
2936 (atom, myparent), noiselevel=-1)
2938 if not mreasons and \
2940 pkg.metadata["REQUIRED_USE"] and \
2941 eapi_has_required_use(pkg.metadata["EAPI"]):
2942 if not check_required_use(
2943 pkg.metadata["REQUIRED_USE"],
2944 self._pkg_use_enabled(pkg),
2945 pkg.iuse.is_valid_flag):
2946 required_use_unsatisfied.append(pkg)
2948 root_slot = (pkg.root, pkg.slot_atom)
2949 if pkg.built and root_slot in self._rebuild.rebuild_list:
2950 mreasons = ["need to rebuild from source"]
2951 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
2952 mreasons = ["need to rebuild from source"]
2953 elif pkg.built and not mreasons:
2954 mreasons = ["use flag configuration mismatch"]
2955 masked_packages.append(
2956 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
2960 raise self._backtrack_mask()
2964 if check_autounmask_breakage:
2965 if autounmask_broke_use_dep:
2966 raise self._autounmask_breakage()
2970 missing_use_reasons = []
2971 missing_iuse_reasons = []
2972 for pkg in missing_use:
2973 use = self._pkg_use_enabled(pkg)
2975 #Use the unevaluated atom here, because some flags might have gone
2976 #lost during evaluation.
2977 required_flags = atom.unevaluated_atom.use.required
2978 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2982 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2983 missing_iuse_reasons.append((pkg, mreasons))
2985 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2986 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2988 untouchable_flags = \
2989 frozenset(chain(pkg.use.mask, pkg.use.force))
2990 if untouchable_flags.intersection(
2991 chain(need_enable, need_disable)):
2994 missing_use_adjustable.add(pkg)
2995 required_use = pkg.metadata["REQUIRED_USE"]
2996 required_use_warning = ""
2998 old_use = self._pkg_use_enabled(pkg)
2999 new_use = set(self._pkg_use_enabled(pkg))
3000 for flag in need_enable:
3002 for flag in need_disable:
3003 new_use.discard(flag)
3004 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3005 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3006 required_use_warning = ", this change violates use flag constraints " + \
3007 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3009 if need_enable or need_disable:
3011 changes.extend(colorize("red", "+" + x) \
3012 for x in need_enable)
3013 changes.extend(colorize("blue", "-" + x) \
3014 for x in need_disable)
3015 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3016 missing_use_reasons.append((pkg, mreasons))
3018 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3019 # Lets see if the violated use deps are conditional.
3020 # If so, suggest to change them on the parent.
3022 # If the child package is masked then a change to
3023 # parent USE is not a valid solution (a normal mask
3024 # message should be displayed instead).
3025 if pkg in masked_pkg_instances:
3029 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3030 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3031 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3032 #all violated use deps are conditional
3034 conditional = violated_atom.use.conditional
3035 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3036 conditional.enabled, conditional.disabled))
3038 untouchable_flags = \
3039 frozenset(chain(myparent.use.mask, myparent.use.force))
3040 if untouchable_flags.intersection(involved_flags):
3043 required_use = myparent.metadata["REQUIRED_USE"]
3044 required_use_warning = ""
3046 old_use = self._pkg_use_enabled(myparent)
3047 new_use = set(self._pkg_use_enabled(myparent))
3048 for flag in involved_flags:
3050 new_use.discard(flag)
3053 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
3054 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
3055 required_use_warning = ", this change violates use flag constraints " + \
3056 "defined by %s: '%s'" % (myparent.cpv, \
3057 human_readable_required_use(required_use))
3059 for flag in involved_flags:
3060 if flag in self._pkg_use_enabled(myparent):
3061 changes.append(colorize("blue", "-" + flag))
3063 changes.append(colorize("red", "+" + flag))
3064 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3065 if (myparent, mreasons) not in missing_use_reasons:
3066 missing_use_reasons.append((myparent, mreasons))
3068 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3069 in missing_use_reasons if pkg not in masked_pkg_instances]
3071 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3072 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3074 show_missing_use = False
3075 if unmasked_use_reasons:
3076 # Only show the latest version.
3077 show_missing_use = []
3079 parent_reason = None
3080 for pkg, mreasons in unmasked_use_reasons:
3082 if parent_reason is None:
3083 #This happens if a use change on the parent
3084 #leads to a satisfied conditional use dep.
3085 parent_reason = (pkg, mreasons)
3086 elif pkg_reason is None:
3087 #Don't rely on the first pkg in unmasked_use_reasons,
3088 #being the highest version of the dependency.
3089 pkg_reason = (pkg, mreasons)
3091 show_missing_use.append(pkg_reason)
3093 show_missing_use.append(parent_reason)
3095 elif unmasked_iuse_reasons:
3096 masked_with_iuse = False
3097 for pkg in masked_pkg_instances:
3098 #Use atom.unevaluated here, because some flags might have gone
3099 #lost during evaluation.
3100 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3101 # Package(s) with required IUSE are masked,
3102 # so display a normal masking message.
3103 masked_with_iuse = True
3105 if not masked_with_iuse:
3106 show_missing_use = unmasked_iuse_reasons
3108 if required_use_unsatisfied:
3109 # If there's a higher unmasked version in missing_use_adjustable
3110 # then we want to show that instead.
3111 for pkg in missing_use_adjustable:
3112 if pkg not in masked_pkg_instances and \
3113 pkg > required_use_unsatisfied[0]:
3114 required_use_unsatisfied = False
3119 if required_use_unsatisfied:
3120 # We have an unmasked package that only requires USE adjustment
3121 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3122 # that the user wants the latest version, so only the first
3123 # instance is displayed.
3124 pkg = required_use_unsatisfied[0]
3125 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3126 writemsg_stdout("\n!!! " + \
3127 colorize("BAD", "The ebuild selected to satisfy ") + \
3128 colorize("INFORM", xinfo) + \
3129 colorize("BAD", " has unmet requirements.") + "\n",
3131 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3132 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
3134 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
3135 "are unsatisfied:\n", noiselevel=-1)
3136 reduced_noise = check_required_use(
3137 pkg.metadata["REQUIRED_USE"],
3138 self._pkg_use_enabled(pkg),
3139 pkg.iuse.is_valid_flag).tounicode()
3140 writemsg_stdout(" %s\n" % \
3141 human_readable_required_use(reduced_noise),
3143 normalized_required_use = \
3144 " ".join(pkg.metadata["REQUIRED_USE"].split())
3145 if reduced_noise != normalized_required_use:
3146 writemsg_stdout("\n The above constraints " + \
3147 "are a subset of the following complete expression:\n",
3149 writemsg_stdout(" %s\n" % \
3150 human_readable_required_use(normalized_required_use),
3152 writemsg_stdout("\n", noiselevel=-1)
3154 elif show_missing_use:
3155 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3156 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3157 for pkg, mreasons in show_missing_use:
3158 writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3160 elif masked_packages:
3161 writemsg_stdout("\n!!! " + \
3162 colorize("BAD", "All ebuilds that could satisfy ") + \
3163 colorize("INFORM", xinfo) + \
3164 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3165 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3166 have_eapi_mask = show_masked_packages(masked_packages)
3168 writemsg_stdout("\n", noiselevel=-1)
3169 msg = ("The current version of portage supports " + \
3170 "EAPI '%s'. You must upgrade to a newer version" + \
3171 " of portage before EAPI masked packages can" + \
3172 " be installed.") % portage.const.EAPI
3173 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3174 writemsg_stdout("\n", noiselevel=-1)
3178 if not atom.cp.startswith("null/"):
3179 for pkg in self._iter_match_pkgs_any(
3180 root_config, Atom(atom.cp)):
3184 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3185 if isinstance(myparent, AtomArg) and \
3187 self._frozen_config.myopts.get(
3188 "--misspell-suggestions", "y") != "n":
3189 cp = myparent.atom.cp.lower()
3190 cat, pkg = portage.catsplit(cp)
3194 writemsg_stdout("\nemerge: searching for similar names..."
3198 all_cp.update(vardb.cp_all())
3199 all_cp.update(portdb.cp_all())
3200 if "--usepkg" in self._frozen_config.myopts:
3201 all_cp.update(bindb.cp_all())
3202 # discard dir containing no ebuilds
3206 for cp_orig in all_cp:
3207 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3208 all_cp = set(orig_cp_map)
3211 matches = difflib.get_close_matches(cp, all_cp)
3214 for other_cp in list(all_cp):
3215 other_pkg = portage.catsplit(other_cp)[1]
3216 if other_pkg == pkg:
3217 # discard dir containing no ebuilds
3218 all_cp.discard(other_cp)
3220 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3221 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3223 for pkg_match in pkg_matches:
3224 matches.extend(pkg_to_cp[pkg_match])
3226 matches_orig_case = []
3228 matches_orig_case.extend(orig_cp_map[cp])
3229 matches = matches_orig_case
3231 if len(matches) == 1:
3232 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
3234 elif len(matches) > 1:
3236 "\nemerge: Maybe you meant any of these: %s?\n" % \
3237 (", ".join(matches),), noiselevel=-1)
3239 # Generally, this would only happen if
3240 # all dbapis are empty.
3241 writemsg_stdout(" nothing similar found.\n"
3244 if not isinstance(myparent, AtomArg):
3245 # It's redundant to show parent for AtomArg since
3246 # it's the same as 'xinfo' displayed above.
3247 dep_chain = self._get_dep_chain(myparent, atom)
3248 for node, node_type in dep_chain:
3249 msg.append('(dependency required by "%s" [%s])' % \
3250 (colorize('INFORM', _unicode_decode("%s") % \
3251 (node)), node_type))
3254 writemsg_stdout("\n".join(msg), noiselevel=-1)
3255 writemsg_stdout("\n", noiselevel=-1)
3259 writemsg_stdout("\n", noiselevel=-1)
3261 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3262 for db, pkg_type, built, installed, db_keys in \
3263 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3264 for pkg in self._iter_match_pkgs(root_config,
3265 pkg_type, atom, onlydeps=onlydeps):
3268 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3270 Iterate over Package instances of pkg_type matching the given atom.
3271 This does not check visibility and it also does not match USE for
3272 unbuilt ebuilds since USE are lazily calculated after visibility
3273 checks (to avoid the expense when possible).
3276 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3278 if hasattr(db, "xmatch"):
3279 # For portdbapi we match only against the cpv, in order
3280 # to bypass unnecessary cache access for things like IUSE
3281 # and SLOT. Later, we cache the metadata in a Package
3282 # instance, and use that for further matching. This
3283 # optimization is especially relevant since
3284 # pordbapi.aux_get() does not cache calls that have
3285 # myrepo or mytree arguments.
3286 cpv_list = db.xmatch("match-all-cpv-only", atom)
3288 cpv_list = db.match(atom)
3290 # USE=multislot can make an installed package appear as if
3291 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3292 # won't do any good as long as USE=multislot is enabled since
3293 # the newly built package still won't have the expected slot.
3294 # Therefore, assume that such SLOT dependencies are already
3295 # satisfied rather than forcing a rebuild.
3296 installed = pkg_type == 'installed'
3297 if installed and not cpv_list and atom.slot:
3299 if "remove" in self._dynamic_config.myparams:
3300 # We need to search the portdbapi, which is not in our
3301 # normal dbs list, in order to find the real SLOT.
3302 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
3303 db_keys = list(portdb._aux_cache_keys)
3304 dbs = [(portdb, "ebuild", False, False, db_keys)]
3306 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
3308 for cpv in db.match(atom.cp):
3309 slot_available = False
3310 for other_db, other_type, other_built, \
3311 other_installed, other_keys in dbs:
3314 other_db.aux_get(cpv, ["SLOT"])[0]:
3315 slot_available = True
3319 if not slot_available:
3321 inst_pkg = self._pkg(cpv, "installed",
3322 root_config, installed=installed, myrepo = atom.repo)
3323 # Remove the slot from the atom and verify that
3324 # the package matches the resulting atom.
3325 if portage.match_from_list(
3326 atom.without_slot, [inst_pkg]):
3331 atom_set = InternalPackageSet(initial_atoms=(atom,),
3333 if atom.repo is None and hasattr(db, "getRepositories"):
3334 repo_list = db.getRepositories()
3336 repo_list = [atom.repo]
3340 for cpv in cpv_list:
3341 for repo in repo_list:
3344 pkg = self._pkg(cpv, pkg_type, root_config,
3345 installed=installed, onlydeps=onlydeps, myrepo=repo)
3346 except portage.exception.PackageNotFound:
3349 # A cpv can be returned from dbapi.match() as an
3350 # old-style virtual match even in cases when the
3351 # package does not actually PROVIDE the virtual.
3352 # Filter out any such false matches here.
3354 # Make sure that cpv from the current repo satisfies the atom.
3355 # This might not be the case if there are several repos with
3356 # the same cpv, but different metadata keys, like SLOT.
3357 # Also, for portdbapi, parts of the match that require
3358 # metadata access are deferred until we have cached the
3359 # metadata in a Package instance.
3360 if not atom_set.findAtomForPackage(pkg,
3361 modified_use=self._pkg_use_enabled(pkg)):
3365 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3366 cache_key = (root, atom, onlydeps)
3367 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3370 if pkg and not existing:
3371 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3372 if existing and existing == pkg:
3373 # Update the cache to reflect that the
3374 # package has been added to the graph.
3376 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3378 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3379 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3382 settings = pkg.root_config.settings
3383 if self._pkg_visibility_check(pkg) and \
3384 not (pkg.installed and pkg.masks):
3385 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3388 def _want_installed_pkg(self, pkg):
3390 Given an installed package returned from select_pkg, return
3391 True if the user has not explicitly requested for this package
3392 to be replaced (typically via an atom on the command line).
3394 if "selective" not in self._dynamic_config.myparams and \
3395 pkg.root == self._frozen_config.target_root:
3396 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
3397 modified_use=self._pkg_use_enabled(pkg)):
3400 next(self._iter_atoms_for_pkg(pkg))
3401 except StopIteration:
3403 except portage.exception.InvalidDependString:
3409 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3410 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3412 default_selection = (pkg, existing)
3414 if self._dynamic_config._autounmask is True:
3415 if pkg is not None and \
3417 not self._want_installed_pkg(pkg):
3420 for only_use_changes in True, False:
3424 for allow_unmasks in (False, True):
3425 if only_use_changes and allow_unmasks:
3432 self._wrapped_select_pkg_highest_available_imp(
3433 root, atom, onlydeps=onlydeps,
3434 allow_use_changes=True,
3435 allow_unstable_keywords=(not only_use_changes),
3436 allow_license_changes=(not only_use_changes),
3437 allow_unmasks=allow_unmasks)
3439 if pkg is not None and \
3441 not self._want_installed_pkg(pkg):
3444 if self._dynamic_config._need_restart:
3448 # This ensures that we can fall back to an installed package
3449 # that may have been rejected in the autounmask path above.
3450 return default_selection
3452 return pkg, existing
3454 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3459 if pkg in self._dynamic_config.digraph:
3460 # Sometimes we need to temporarily disable
3461 # dynamic_config._autounmask, but for overall
3462 # consistency in dependency resolution, in any
3463 # case we want to respect autounmask visibity
3464 # for packages that have already been added to
3465 # the dependency graph.
3468 if not self._dynamic_config._autounmask:
3471 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3472 root_config = self._frozen_config.roots[pkg.root]
3473 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
3475 masked_by_unstable_keywords = False
3476 masked_by_missing_keywords = False
3477 missing_licenses = None
3478 masked_by_something_else = False
3479 masked_by_p_mask = False
3481 for reason in mreasons:
3482 hint = reason.unmask_hint
3485 masked_by_something_else = True
3486 elif hint.key == "unstable keyword":
3487 masked_by_unstable_keywords = True
3488 if hint.value == "**":
3489 masked_by_missing_keywords = True
3490 elif hint.key == "p_mask":
3491 masked_by_p_mask = True
3492 elif hint.key == "license":
3493 missing_licenses = hint.value
3495 masked_by_something_else = True
3497 if masked_by_something_else:
3500 if pkg in self._dynamic_config._needed_unstable_keywords:
3501 #If the package is already keyworded, remove the mask.
3502 masked_by_unstable_keywords = False
3503 masked_by_missing_keywords = False
3505 if pkg in self._dynamic_config._needed_p_mask_changes:
3506 #If the package is already keyworded, remove the mask.
3507 masked_by_p_mask = False
3509 if missing_licenses:
3510 #If the needed licenses are already unmasked, remove the mask.
3511 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
3513 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
3514 #Package has already been unmasked.
3517 #We treat missing keywords in the same way as masks.
3518 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
3519 (masked_by_missing_keywords and not allow_unmasks) or \
3520 (masked_by_p_mask and not allow_unmasks) or \
3521 (missing_licenses and not allow_license_changes):
3522 #We are not allowed to do the needed changes.
3525 if masked_by_unstable_keywords:
3526 self._dynamic_config._needed_unstable_keywords.add(pkg)
3527 backtrack_infos = self._dynamic_config._backtrack_infos
3528 backtrack_infos.setdefault("config", {})
3529 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
3530 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
3532 if masked_by_p_mask:
3533 self._dynamic_config._needed_p_mask_changes.add(pkg)
3534 backtrack_infos = self._dynamic_config._backtrack_infos
3535 backtrack_infos.setdefault("config", {})
3536 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
3537 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
3539 if missing_licenses:
3540 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
3541 backtrack_infos = self._dynamic_config._backtrack_infos
3542 backtrack_infos.setdefault("config", {})
3543 backtrack_infos["config"].setdefault("needed_license_changes", set())
3544 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
3548 def _pkg_use_enabled(self, pkg, target_use=None):
3550 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
3551 If target_use is given, the need changes are computed to make the package useable.
3552 Example: target_use = { "foo": True, "bar": False }
3553 The flags target_use must be in the pkg's IUSE.
3556 return pkg.use.enabled
3557 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
3559 if target_use is None:
3560 if needed_use_config_change is None:
3561 return pkg.use.enabled
3563 return needed_use_config_change[0]
3565 if needed_use_config_change is not None:
3566 old_use = needed_use_config_change[0]
3568 old_changes = needed_use_config_change[1]
3569 new_changes = old_changes.copy()
3571 old_use = pkg.use.enabled
3576 for flag, state in target_use.items():
3578 if flag not in old_use:
3579 if new_changes.get(flag) == False:
3581 new_changes[flag] = True
3585 if new_changes.get(flag) == True:
3587 new_changes[flag] = False
3588 new_use.update(old_use.difference(target_use))
3590 def want_restart_for_use_change(pkg, new_use):
3591 if pkg not in self._dynamic_config.digraph.nodes:
3594 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
3595 dep = pkg.metadata[key]
3596 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3597 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3599 if old_val != new_val:
3602 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3603 if not parent_atoms:
3606 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
3607 for ppkg, atom in parent_atoms:
3608 if not atom.use or \
3609 not atom.use.required.intersection(changes):
3616 if new_changes != old_changes:
3617 #Don't do the change if it violates REQUIRED_USE.
3618 required_use = pkg.metadata["REQUIRED_USE"]
3619 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3620 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3623 if pkg.use.mask.intersection(new_changes) or \
3624 pkg.use.force.intersection(new_changes):
3627 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
3628 backtrack_infos = self._dynamic_config._backtrack_infos
3629 backtrack_infos.setdefault("config", {})
3630 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
3631 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
3632 if want_restart_for_use_change(pkg, new_use):
3633 self._dynamic_config._need_restart = True
3636 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
3637 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3638 root_config = self._frozen_config.roots[root]
3639 pkgsettings = self._frozen_config.pkgsettings[root]
3640 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3641 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3642 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3643 # List of acceptable packages, ordered by type preference.
3644 matched_packages = []
3645 matched_pkgs_ignore_use = []
3646 highest_version = None
3647 if not isinstance(atom, portage.dep.Atom):
3648 atom = portage.dep.Atom(atom)
3650 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
3651 existing_node = None
3653 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3654 usepkg = "--usepkg" in self._frozen_config.myopts
3655 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3656 empty = "empty" in self._dynamic_config.myparams
3657 selective = "selective" in self._dynamic_config.myparams
3659 avoid_update = "--update" not in self._frozen_config.myopts
3660 dont_miss_updates = "--update" in self._frozen_config.myopts
3661 use_ebuild_visibility = self._frozen_config.myopts.get(
3662 '--use-ebuild-visibility', 'n') != 'n'
3663 reinstall_atoms = self._frozen_config.reinstall_atoms
3664 usepkg_exclude = self._frozen_config.usepkg_exclude
3665 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
3667 # Behavior of the "selective" parameter depends on
3668 # whether or not a package matches an argument atom.
3669 # If an installed package provides an old-style
3670 # virtual that is no longer provided by an available
3671 # package, the installed package may match an argument
3672 # atom even though none of the available packages do.
3673 # Therefore, "selective" logic does not consider
3674 # whether or not an installed package matches an
3675 # argument atom. It only considers whether or not
3676 # available packages match argument atoms, which is
3677 # represented by the found_available_arg flag.
3678 found_available_arg = False
3679 packages_with_invalid_use_config = []
3680 for find_existing_node in True, False:
3683 for db, pkg_type, built, installed, db_keys in dbs:
3686 if installed and not find_existing_node:
3687 want_reinstall = reinstall or empty or \
3688 (found_available_arg and not selective)
3689 if want_reinstall and matched_packages:
3692 # Ignore USE deps for the initial match since we want to
3693 # ensure that updates aren't missed solely due to the user's
3694 # USE configuration.
3695 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3697 if pkg in self._dynamic_config._runtime_pkg_mask:
3698 # The package has been masked by the backtracking logic
3700 root_slot = (pkg.root, pkg.slot_atom)
3701 if pkg.built and root_slot in self._rebuild.rebuild_list:
3703 if (pkg.installed and
3704 root_slot in self._rebuild.reinstall_list):
3707 if not pkg.installed and \
3708 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3709 modified_use=self._pkg_use_enabled(pkg)):
3712 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
3713 modified_use=self._pkg_use_enabled(pkg)):
3716 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
3717 modified_use=self._pkg_use_enabled(pkg))
3719 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
3720 (not pkg.installed or dont_miss_updates):
3721 # Check if a higher version was rejected due to user
3722 # USE configuration. The packages_with_invalid_use_config
3723 # list only contains unbuilt ebuilds since USE can't
3724 # be changed for built packages.
3725 higher_version_rejected = False
3726 repo_priority = pkg.repo_priority
3727 for rejected in packages_with_invalid_use_config:
3728 if rejected.cp != pkg.cp:
3731 higher_version_rejected = True
3733 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
3734 # If version is identical then compare
3735 # repo priority (see bug #350254).
3736 rej_repo_priority = rejected.repo_priority
3737 if rej_repo_priority is not None and \
3738 (repo_priority is None or
3739 rej_repo_priority > repo_priority):
3740 higher_version_rejected = True
3742 if higher_version_rejected:
3746 reinstall_for_flags = None
3748 if not pkg.installed or \
3749 (matched_packages and not avoid_update):
3750 # Only enforce visibility on installed packages
3751 # if there is at least one other visible package
3752 # available. By filtering installed masked packages
3753 # here, packages that have been masked since they
3754 # were installed can be automatically downgraded
3755 # to an unmasked version. NOTE: This code needs to
3756 # be consistent with masking behavior inside
3757 # _dep_check_composite_db, in order to prevent
3758 # incorrect choices in || deps like bug #351828.
3760 if not self._pkg_visibility_check(pkg, \
3761 allow_unstable_keywords=allow_unstable_keywords,
3762 allow_license_changes=allow_license_changes,
3763 allow_unmasks=allow_unmasks):
3766 # Enable upgrade or downgrade to a version
3767 # with visible KEYWORDS when the installed
3768 # version is masked by KEYWORDS, but never
3769 # reinstall the same exact version only due
3770 # to a KEYWORDS mask. See bug #252167.
3772 if pkg.type_name != "ebuild" and matched_packages:
3773 # Don't re-install a binary package that is
3774 # identical to the currently installed package
3775 # (see bug #354441).
3776 identical_binary = False
3777 if usepkg and pkg.installed:
3778 for selected_pkg in matched_packages:
3779 if selected_pkg.type_name == "binary" and \
3780 selected_pkg.cpv == pkg.cpv and \
3781 selected_pkg.metadata.get('BUILD_TIME') == \
3782 pkg.metadata.get('BUILD_TIME'):
3783 identical_binary = True
3786 if not identical_binary:
3787 # If the ebuild no longer exists or it's
3788 # keywords have been dropped, reject built
3789 # instances (installed or binary).
3790 # If --usepkgonly is enabled, assume that
3791 # the ebuild status should be ignored.
3792 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
3793 if pkg.installed and pkg.masks:
3798 pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
3799 except portage.exception.PackageNotFound:
3800 pkg_eb_visible = False
3801 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3802 "ebuild", Atom("=%s" % (pkg.cpv,))):
3803 if self._pkg_visibility_check(pkg_eb, \
3804 allow_unstable_keywords=allow_unstable_keywords,
3805 allow_license_changes=allow_license_changes,
3806 allow_unmasks=allow_unmasks):
3807 pkg_eb_visible = True
3809 if not pkg_eb_visible:
3812 if not self._pkg_visibility_check(pkg_eb, \
3813 allow_unstable_keywords=allow_unstable_keywords,
3814 allow_license_changes=allow_license_changes,
3815 allow_unmasks=allow_unmasks):
3818 # Calculation of USE for unbuilt ebuilds is relatively
3819 # expensive, so it is only performed lazily, after the
3820 # above visibility checks are complete.
3823 if root == self._frozen_config.target_root:
3825 myarg = next(self._iter_atoms_for_pkg(pkg))
3826 except StopIteration:
3828 except portage.exception.InvalidDependString:
3830 # masked by corruption
3832 if not installed and myarg:
3833 found_available_arg = True
3835 if atom.unevaluated_atom.use:
3836 #Make sure we don't miss a 'missing IUSE'.
3837 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3838 # Don't add this to packages_with_invalid_use_config
3839 # since IUSE cannot be adjusted by the user.
3844 matched_pkgs_ignore_use.append(pkg)
3845 if allow_use_changes and not pkg.built:
3847 for flag in atom.use.enabled:
3848 target_use[flag] = True
3849 for flag in atom.use.disabled:
3850 target_use[flag] = False
3851 use = self._pkg_use_enabled(pkg, target_use)
3853 use = self._pkg_use_enabled(pkg)
3856 can_adjust_use = not pkg.built
3857 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3858 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3860 if atom.use.enabled:
3861 if atom.use.enabled.intersection(missing_disabled):
3863 can_adjust_use = False
3864 need_enabled = atom.use.enabled.difference(use)
3866 need_enabled = need_enabled.difference(missing_enabled)
3870 if pkg.use.mask.intersection(need_enabled):
3871 can_adjust_use = False
3873 if atom.use.disabled:
3874 if atom.use.disabled.intersection(missing_enabled):
3876 can_adjust_use = False
3877 need_disabled = atom.use.disabled.intersection(use)
3879 need_disabled = need_disabled.difference(missing_disabled)
3883 if pkg.use.force.difference(
3884 pkg.use.mask).intersection(need_disabled):
3885 can_adjust_use = False
3889 # Above we must ensure that this package has
3890 # absolutely no use.force, use.mask, or IUSE
3891 # issues that the user typically can't make
3892 # adjustments to solve (see bug #345979).
3893 # FIXME: Conditional USE deps complicate
3894 # issues. This code currently excludes cases
3895 # in which the user can adjust the parent
3896 # package's USE in order to satisfy the dep.
3897 packages_with_invalid_use_config.append(pkg)
3900 if pkg.cp == atom_cp:
3901 if highest_version is None:
3902 highest_version = pkg
3903 elif pkg > highest_version:
3904 highest_version = pkg
3905 # At this point, we've found the highest visible
3906 # match from the current repo. Any lower versions
3907 # from this repo are ignored, so this so the loop
3908 # will always end with a break statement below
3910 if find_existing_node:
3911 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3914 # Use PackageSet.findAtomForPackage()
3915 # for PROVIDE support.
3916 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3917 if highest_version and \
3918 e_pkg.cp == atom_cp and \
3919 e_pkg < highest_version and \
3920 e_pkg.slot_atom != highest_version.slot_atom:
3921 # There is a higher version available in a
3922 # different slot, so this existing node is
3926 matched_packages.append(e_pkg)
3927 existing_node = e_pkg
3929 # Compare built package to current config and
3930 # reject the built package if necessary.
3931 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
3932 ("--newuse" in self._frozen_config.myopts or \
3933 "--reinstall" in self._frozen_config.myopts or \
3934 "--binpkg-respect-use" in self._frozen_config.myopts):
3935 iuses = pkg.iuse.all
3936 old_use = self._pkg_use_enabled(pkg)
3938 pkgsettings.setcpv(myeb)
3940 pkgsettings.setcpv(pkg)
3941 now_use = pkgsettings["PORTAGE_USE"].split()
3942 forced_flags = set()
3943 forced_flags.update(pkgsettings.useforce)
3944 forced_flags.update(pkgsettings.usemask)
3946 if myeb and not usepkgonly and not useoldpkg:
3947 cur_iuse = myeb.iuse.all
3948 if self._reinstall_for_flags(forced_flags,
3952 # Compare current config to installed package
3953 # and do not reinstall if possible.
3954 if not installed and not useoldpkg and \
3955 ("--newuse" in self._frozen_config.myopts or \
3956 "--reinstall" in self._frozen_config.myopts) and \
3957 cpv in vardb.match(atom):
3958 forced_flags = set()
3959 forced_flags.update(pkg.use.force)
3960 forced_flags.update(pkg.use.mask)
3961 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
3962 old_use = inst_pkg.use.enabled
3963 old_iuse = inst_pkg.iuse.all
3964 cur_use = self._pkg_use_enabled(pkg)
3965 cur_iuse = pkg.iuse.all
3966 reinstall_for_flags = \
3967 self._reinstall_for_flags(
3968 forced_flags, old_use, old_iuse,
3970 if reinstall_for_flags:
3972 if reinstall_atoms.findAtomForPackage(pkg, \
3973 modified_use=self._pkg_use_enabled(pkg)):
3978 matched_oldpkg.append(pkg)
3979 matched_packages.append(pkg)
3980 if reinstall_for_flags:
3981 self._dynamic_config._reinstall_nodes[pkg] = \
3985 if not matched_packages:
3988 if "--debug" in self._frozen_config.myopts:
3989 for pkg in matched_packages:
3990 portage.writemsg("%s %s%s%s\n" % \
3991 ((pkg.type_name + ":").rjust(10),
3992 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
3994 # Filter out any old-style virtual matches if they are
3995 # mixed with new-style virtual matches.
3997 if len(matched_packages) > 1 and \
3998 "virtual" == portage.catsplit(cp)[0]:
3999 for pkg in matched_packages:
4002 # Got a new-style virtual, so filter
4003 # out any old-style virtuals.
4004 matched_packages = [pkg for pkg in matched_packages \
4008 if existing_node is not None and \
4009 existing_node in matched_packages:
4010 return existing_node, existing_node
4012 if len(matched_packages) > 1:
4013 if rebuilt_binaries:
4017 for pkg in matched_packages:
4023 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4025 if built_pkg is not None and inst_pkg is not None:
4026 # Only reinstall if binary package BUILD_TIME is
4027 # non-empty, in order to avoid cases like to
4028 # bug #306659 where BUILD_TIME fields are missing
4029 # in local and/or remote Packages file.
4031 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
4032 except (KeyError, ValueError):
4036 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
4037 except (KeyError, ValueError):
4038 installed_timestamp = 0
4040 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4042 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4043 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4044 if built_timestamp and \
4045 built_timestamp > installed_timestamp and \
4046 built_timestamp >= minimal_timestamp:
4047 return built_pkg, existing_node
4049 #Don't care if the binary has an older BUILD_TIME than the installed
4050 #package. This is for closely tracking a binhost.
4051 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4053 if built_timestamp and \
4054 built_timestamp != installed_timestamp:
4055 return built_pkg, existing_node
4057 for pkg in matched_packages:
4058 if pkg.installed and pkg.invalid:
4059 matched_packages = [x for x in \
4060 matched_packages if x is not pkg]
4063 for pkg in matched_packages:
4064 if pkg.installed and self._pkg_visibility_check(pkg, \
4065 allow_unstable_keywords=allow_unstable_keywords,
4066 allow_license_changes=allow_license_changes,
4067 allow_unmasks=allow_unmasks):
4068 return pkg, existing_node
4070 visible_matches = []
4072 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4073 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
4074 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
4075 if not visible_matches:
4076 visible_matches = [pkg.cpv for pkg in matched_packages \
4077 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
4078 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
4080 bestmatch = portage.best(visible_matches)
4082 # all are masked, so ignore visibility
4083 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4084 matched_packages = [pkg for pkg in matched_packages \
4085 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4087 # ordered by type preference ("ebuild" type is the last resort)
4088 return matched_packages[-1], existing_node
4090 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4092 Select packages that have already been added to the graph or
4093 those that are installed and have not been scheduled for
4096 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4097 matches = graph_db.match_pkgs(atom)
4100 pkg = matches[-1] # highest match
4101 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4102 return pkg, in_graph
4104 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4106 Select packages that are installed.
4108 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4112 if len(matches) > 1:
4113 matches.reverse() # ascending order
4114 unmasked = [pkg for pkg in matches if \
4115 self._pkg_visibility_check(pkg)]
4117 if len(unmasked) == 1:
4120 # Account for packages with masks (like KEYWORDS masks)
4121 # that are usually ignored in visibility checks for
4122 # installed packages, in order to handle cases like
4124 unmasked = [pkg for pkg in matches if not pkg.masks]
4127 pkg = matches[-1] # highest match
4128 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4129 return pkg, in_graph
4131 def _complete_graph(self, required_sets=None):
4133 Add any deep dependencies of required sets (args, system, world) that
4134 have not been pulled into the graph yet. This ensures that the graph
4135 is consistent such that initially satisfied deep dependencies are not
4136 broken in the new graph. Initially unsatisfied dependencies are
4137 irrelevant since we only want to avoid breaking dependencies that are
4138 initially satisfied.
4140 Since this method can consume enough time to disturb users, it is
4141 currently only enabled by the --complete-graph option.
4143 @param required_sets: contains required sets (currently only used
4144 for depclean and prune removal operations)
4145 @type required_sets: dict
4147 if "--buildpkgonly" in self._frozen_config.myopts or \
4148 "recurse" not in self._dynamic_config.myparams:
4151 if "complete" not in self._dynamic_config.myparams:
4152 # Automatically enable complete mode if there are any
4153 # downgrades, since they often break dependencies
4154 # (like in bug #353613).
4155 have_downgrade = False
4156 for node in self._dynamic_config.digraph:
4157 if not isinstance(node, Package) or \
4158 node.operation != "merge":
4160 vardb = self._frozen_config.roots[
4161 node.root].trees["vartree"].dbapi
4162 inst_pkg = vardb.match_pkgs(node.slot_atom)
4163 if inst_pkg and inst_pkg[0] > node:
4164 have_downgrade = True
4168 self._dynamic_config.myparams["complete"] = True
4170 # Skip complete graph mode, in order to avoid consuming
4171 # enough time to disturb users.
4176 # Put the depgraph into a mode that causes it to only
4177 # select packages that have already been added to the
4178 # graph or those that are installed and have not been
4179 # scheduled for replacement. Also, toggle the "deep"
4180 # parameter so that all dependencies are traversed and
4182 self._select_atoms = self._select_atoms_from_graph
4183 if "remove" in self._dynamic_config.myparams:
4184 self._select_package = self._select_pkg_from_installed
4186 self._select_package = self._select_pkg_from_graph
4187 self._dynamic_config._traverse_ignored_deps = True
4188 already_deep = self._dynamic_config.myparams.get("deep") is True
4189 if not already_deep:
4190 self._dynamic_config.myparams["deep"] = True
4192 # Invalidate the package selection cache, since
4193 # _select_package has just changed implementations.
4194 for trees in self._dynamic_config._filtered_trees.values():
4195 trees["porttree"].dbapi._clear_cache()
4197 args = self._dynamic_config._initial_arg_list[:]
4198 for root in self._frozen_config.roots:
4199 if root != self._frozen_config.target_root and \
4200 "remove" in self._dynamic_config.myparams:
4201 # Only pull in deps for the relevant root.
4203 depgraph_sets = self._dynamic_config.sets[root]
4204 required_set_names = self._frozen_config._required_set_names.copy()
4205 remaining_args = required_set_names.copy()
4206 if required_sets is None or root not in required_sets:
4209 # Removal actions may override sets with temporary
4210 # replacements that have had atoms removed in order
4211 # to implement --deselect behavior.
4212 required_set_names = set(required_sets[root])
4213 depgraph_sets.sets.clear()
4214 depgraph_sets.sets.update(required_sets[root])
4215 if "remove" not in self._dynamic_config.myparams and \
4216 root == self._frozen_config.target_root and \
4218 remaining_args.difference_update(depgraph_sets.sets)
4219 if not remaining_args and \
4220 not self._dynamic_config._ignored_deps and \
4221 not self._dynamic_config._dep_stack:
4223 root_config = self._frozen_config.roots[root]
4224 for s in required_set_names:
4225 pset = depgraph_sets.sets.get(s)
4227 pset = root_config.sets[s]
4228 atom = SETPREFIX + s
4229 args.append(SetArg(arg=atom, pset=pset,
4230 root_config=root_config))
4232 self._set_args(args)
4233 for arg in self._expand_set_args(args, add_to_digraph=True):
4234 for atom in arg.pset.getAtoms():
4235 self._dynamic_config._dep_stack.append(
4236 Dependency(atom=atom, root=arg.root_config.root,
4240 if self._dynamic_config._ignored_deps:
4241 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4242 self._dynamic_config._ignored_deps = []
4243 if not self._create_graph(allow_unsatisfied=True):
4245 # Check the unsatisfied deps to see if any initially satisfied deps
4246 # will become unsatisfied due to an upgrade. Initially unsatisfied
4247 # deps are irrelevant since we only want to avoid breaking deps
4248 # that are initially satisfied.
4249 while self._dynamic_config._unsatisfied_deps:
4250 dep = self._dynamic_config._unsatisfied_deps.pop()
4251 vardb = self._frozen_config.roots[
4252 dep.root].trees["vartree"].dbapi
4253 matches = vardb.match_pkgs(dep.atom)
4255 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4257 # An scheduled installation broke a deep dependency.
4258 # Add the installed package to the graph so that it
4259 # will be appropriately reported as a slot collision
4260 # (possibly solvable via backtracking).
4261 pkg = matches[-1] # highest match
4262 if not self._add_pkg(pkg, dep):
4264 if not self._create_graph(allow_unsatisfied=True):
4268 def _pkg(self, cpv, type_name, root_config, installed=False,
4269 onlydeps=False, myrepo = None):
4271 Get a package instance from the cache, or create a new
4272 one if necessary. Raises PackageNotFound from aux_get if it
4273 failures for some reason (package does not exist or is
4277 # Ensure that we use the specially optimized RootConfig instance
4278 # that refers to FakeVartree instead of the real vartree.
4279 root_config = self._frozen_config.roots[root_config.root]
4280 pkg = self._frozen_config._pkg_cache.get(
4281 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4282 repo_name=myrepo, root_config=root_config,
4283 installed=installed, onlydeps=onlydeps))
4284 if pkg is None and onlydeps and not installed:
4285 # Maybe it already got pulled in as a "merge" node.
4286 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4287 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4288 repo_name=myrepo, root_config=root_config,
4289 installed=installed, onlydeps=False))
4292 tree_type = self.pkg_tree_map[type_name]
4293 db = root_config.trees[tree_type].dbapi
4294 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4295 tree_type].dbapi._aux_cache_keys)
4298 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4300 raise portage.exception.PackageNotFound(cpv)
4302 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4303 installed=installed, metadata=metadata, onlydeps=onlydeps,
4304 root_config=root_config, type_name=type_name)
4306 self._frozen_config._pkg_cache[pkg] = pkg
4308 if not self._pkg_visibility_check(pkg) and \
4309 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4310 slot_key = (pkg.root, pkg.slot_atom)
4311 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4312 if other_pkg is None or pkg > other_pkg:
4313 self._frozen_config._highest_license_masked[slot_key] = pkg
4317 def _validate_blockers(self):
4318 """Remove any blockers from the digraph that do not match any of the
4319 packages within the graph. If necessary, create hard deps to ensure
4320 correct merge order such that mutually blocking packages are never
4321 installed simultaneously. Also add runtime blockers from all installed
4322 packages if any of them haven't been added already (bug 128809)."""
4324 if "--buildpkgonly" in self._frozen_config.myopts or \
4325 "--nodeps" in self._frozen_config.myopts:
4328 complete = "complete" in self._dynamic_config.myparams
4329 deep = "deep" in self._dynamic_config.myparams
4332 # Pull in blockers from all installed packages that haven't already
4333 # been pulled into the depgraph, in order to ensure that they are
4334 # respected (bug 128809). Due to the performance penalty that is
4335 # incurred by all the additional dep_check calls that are required,
4336 # blockers returned from dep_check are cached on disk by the
4337 # BlockerCache class.
4339 # For installed packages, always ignore blockers from DEPEND since
4340 # only runtime dependencies should be relevant for packages that
4341 # are already built.
4342 dep_keys = ["RDEPEND", "PDEPEND"]
4343 for myroot in self._frozen_config.trees:
4344 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4345 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4346 pkgsettings = self._frozen_config.pkgsettings[myroot]
4347 root_config = self._frozen_config.roots[myroot]
4348 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
4349 final_db = self._dynamic_config.mydbapi[myroot]
4351 blocker_cache = BlockerCache(myroot, vardb)
4352 stale_cache = set(blocker_cache)
4355 stale_cache.discard(cpv)
4356 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4358 pkg in self._dynamic_config._traversed_pkg_deps
4360 # Check for masked installed packages. Only warn about
4361 # packages that are in the graph in order to avoid warning
4362 # about those that will be automatically uninstalled during
4363 # the merge process or by --depclean. Always warn about
4364 # packages masked by license, since the user likely wants
4365 # to adjust ACCEPT_LICENSE.
4367 if not self._pkg_visibility_check(pkg) and \
4368 (pkg_in_graph or 'LICENSE' in pkg.masks):
4369 self._dynamic_config._masked_installed.add(pkg)
4371 self._check_masks(pkg)
4373 blocker_atoms = None
4379 self._dynamic_config._blocker_parents.child_nodes(pkg))
4384 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4388 # Select just the runtime blockers.
4389 blockers = [blocker for blocker in blockers \
4390 if blocker.priority.runtime or \
4391 blocker.priority.runtime_post]
4392 if blockers is not None:
4393 blockers = set(blocker.atom for blocker in blockers)
4395 # If this node has any blockers, create a "nomerge"
4396 # node for it so that they can be enforced.
4397 self._spinner_update()
4398 blocker_data = blocker_cache.get(cpv)
4399 if blocker_data is not None and \
4400 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4403 # If blocker data from the graph is available, use
4404 # it to validate the cache and update the cache if
4406 if blocker_data is not None and \
4407 blockers is not None:
4408 if not blockers.symmetric_difference(
4409 blocker_data.atoms):
4413 if blocker_data is None and \
4414 blockers is not None:
4415 # Re-use the blockers from the graph.
4416 blocker_atoms = sorted(blockers)
4417 counter = long(pkg.metadata["COUNTER"])
4419 blocker_cache.BlockerData(counter, blocker_atoms)
4420 blocker_cache[pkg.cpv] = blocker_data
4424 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4426 # Use aux_get() to trigger FakeVartree global
4427 # updates on *DEPEND when appropriate.
4428 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4429 # It is crucial to pass in final_db here in order to
4430 # optimize dep_check calls by eliminating atoms via
4431 # dep_wordreduce and dep_eval calls.
4433 success, atoms = portage.dep_check(depstr,
4434 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4435 trees=self._dynamic_config._graph_trees, myroot=myroot)
4438 except Exception as e:
4439 # This is helpful, for example, if a ValueError
4440 # is thrown from cpv_expand due to multiple
4441 # matches (this can happen if an atom lacks a
4443 show_invalid_depstring_notice(
4444 pkg, depstr, str(e))
4448 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4449 if replacement_pkg and \
4450 replacement_pkg[0].operation == "merge":
4451 # This package is being replaced anyway, so
4452 # ignore invalid dependencies so as not to
4453 # annoy the user too much (otherwise they'd be
4454 # forced to manually unmerge it first).
4456 show_invalid_depstring_notice(pkg, depstr, atoms)
4458 blocker_atoms = [myatom for myatom in atoms \
4460 blocker_atoms.sort()
4461 counter = long(pkg.metadata["COUNTER"])
4462 blocker_cache[cpv] = \
4463 blocker_cache.BlockerData(counter, blocker_atoms)
4466 for atom in blocker_atoms:
4467 blocker = Blocker(atom=atom,
4468 eapi=pkg.metadata["EAPI"],
4469 priority=self._priority(runtime=True),
4471 self._dynamic_config._blocker_parents.add(blocker, pkg)
4472 except portage.exception.InvalidAtom as e:
4473 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4474 show_invalid_depstring_notice(
4475 pkg, depstr, "Invalid Atom: %s" % (e,))
4477 for cpv in stale_cache:
4478 del blocker_cache[cpv]
4479 blocker_cache.flush()
4482 # Discard any "uninstall" tasks scheduled by previous calls
4483 # to this method, since those tasks may not make sense given
4484 # the current graph state.
4485 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
4486 if previous_uninstall_tasks:
4487 self._dynamic_config._blocker_uninstalls = digraph()
4488 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
4490 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
4491 self._spinner_update()
4492 root_config = self._frozen_config.roots[blocker.root]
4493 virtuals = root_config.settings.getvirtuals()
4494 myroot = blocker.root
4495 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
4496 final_db = self._dynamic_config.mydbapi[myroot]
4498 provider_virtual = False
4499 if blocker.cp in virtuals and \
4500 not self._have_new_virt(blocker.root, blocker.cp):
4501 provider_virtual = True
4503 # Use this to check PROVIDE for each matched package
4505 atom_set = InternalPackageSet(
4506 initial_atoms=[blocker.atom])
4508 if provider_virtual:
4510 for provider_entry in virtuals[blocker.cp]:
4511 atoms.append(Atom(blocker.atom.replace(
4512 blocker.cp, provider_entry.cp, 1)))
4514 atoms = [blocker.atom]
4516 blocked_initial = set()
4518 for pkg in initial_db.match_pkgs(atom):
4519 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4520 blocked_initial.add(pkg)
4522 blocked_final = set()
4524 for pkg in final_db.match_pkgs(atom):
4525 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4526 blocked_final.add(pkg)
4528 if not blocked_initial and not blocked_final:
4529 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
4530 self._dynamic_config._blocker_parents.remove(blocker)
4531 # Discard any parents that don't have any more blockers.
4532 for pkg in parent_pkgs:
4533 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
4534 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
4535 self._dynamic_config._blocker_parents.remove(pkg)
4537 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4538 unresolved_blocks = False
4539 depends_on_order = set()
4540 for pkg in blocked_initial:
4541 if pkg.slot_atom == parent.slot_atom and \
4542 not blocker.atom.blocker.overlap.forbid:
4543 # New !!atom blockers do not allow temporary
4544 # simulaneous installation, so unlike !atom
4545 # blockers, !!atom blockers aren't ignored
4546 # when they match other packages occupying
4549 if parent.installed:
4550 # Two currently installed packages conflict with
4551 # eachother. Ignore this case since the damage
4552 # is already done and this would be likely to
4553 # confuse users if displayed like a normal blocker.
4556 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4558 if parent.operation == "merge":
4559 # Maybe the blocked package can be replaced or simply
4560 # unmerged to resolve this block.
4561 depends_on_order.add((pkg, parent))
4563 # None of the above blocker resolutions techniques apply,
4564 # so apparently this one is unresolvable.
4565 unresolved_blocks = True
4566 for pkg in blocked_final:
4567 if pkg.slot_atom == parent.slot_atom and \
4568 not blocker.atom.blocker.overlap.forbid:
4569 # New !!atom blockers do not allow temporary
4570 # simulaneous installation, so unlike !atom
4571 # blockers, !!atom blockers aren't ignored
4572 # when they match other packages occupying
4575 if parent.operation == "nomerge" and \
4576 pkg.operation == "nomerge":
4577 # This blocker will be handled the next time that a
4578 # merge of either package is triggered.
4581 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4583 # Maybe the blocking package can be
4584 # unmerged to resolve this block.
4585 if parent.operation == "merge" and pkg.installed:
4586 depends_on_order.add((pkg, parent))
4588 elif parent.operation == "nomerge":
4589 depends_on_order.add((parent, pkg))
4591 # None of the above blocker resolutions techniques apply,
4592 # so apparently this one is unresolvable.
4593 unresolved_blocks = True
4595 # Make sure we don't unmerge any package that have been pulled
4597 if not unresolved_blocks and depends_on_order:
4598 for inst_pkg, inst_task in depends_on_order:
4599 if self._dynamic_config.digraph.contains(inst_pkg) and \
4600 self._dynamic_config.digraph.parent_nodes(inst_pkg):
4601 unresolved_blocks = True
4604 if not unresolved_blocks and depends_on_order:
4605 for inst_pkg, inst_task in depends_on_order:
4606 uninst_task = Package(built=inst_pkg.built,
4607 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4608 metadata=inst_pkg.metadata,
4609 operation="uninstall",
4610 root_config=inst_pkg.root_config,
4611 type_name=inst_pkg.type_name)
4612 # Enforce correct merge order with a hard dep.
4613 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
4614 priority=BlockerDepPriority.instance)
4615 # Count references to this blocker so that it can be
4616 # invalidated after nodes referencing it have been
4618 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
4619 if not unresolved_blocks and not depends_on_order:
4620 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
4621 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
4622 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
4623 self._dynamic_config._blocker_parents.remove(blocker)
4624 if not self._dynamic_config._blocker_parents.child_nodes(parent):
4625 self._dynamic_config._blocker_parents.remove(parent)
4626 if unresolved_blocks:
4627 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
4631 def _accept_blocker_conflicts(self):
4633 for x in ("--buildpkgonly", "--fetchonly",
4634 "--fetch-all-uri", "--nodeps"):
4635 if x in self._frozen_config.myopts:
4640 def _merge_order_bias(self, mygraph):
4642 For optimal leaf node selection, promote deep system runtime deps and
4643 order nodes from highest to lowest overall reference count.
4647 for node in mygraph.order:
4648 node_info[node] = len(mygraph.parent_nodes(node))
4649 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4651 def cmp_merge_preference(node1, node2):
4653 if node1.operation == 'uninstall':
4654 if node2.operation == 'uninstall':
4658 if node2.operation == 'uninstall':
4659 if node1.operation == 'uninstall':
4663 node1_sys = node1 in deep_system_deps
4664 node2_sys = node2 in deep_system_deps
4665 if node1_sys != node2_sys:
4670 return node_info[node2] - node_info[node1]
4672 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4674 def altlist(self, reversed=False):
4676 while self._dynamic_config._serialized_tasks_cache is None:
4677 self._resolve_conflicts()
4679 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
4680 self._serialize_tasks()
4681 except self._serialize_tasks_retry:
4684 retlist = self._dynamic_config._serialized_tasks_cache[:]
4689 def _implicit_libc_deps(self, mergelist, graph):
4691 Create implicit dependencies on libc, in order to ensure that libc
4692 is installed as early as possible (see bug #303567).
4695 implicit_libc_roots = (self._frozen_config._running_root.root,)
4696 for root in implicit_libc_roots:
4697 graphdb = self._dynamic_config.mydbapi[root]
4698 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4699 for atom in self._expand_virt_from_graph(root,
4700 portage.const.LIBC_PACKAGE_ATOM):
4703 match = graphdb.match_pkgs(atom)
4707 if pkg.operation == "merge" and \
4708 not vardb.cpv_exists(pkg.cpv):
4709 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
4714 earlier_libc_pkgs = set()
4716 for pkg in mergelist:
4717 if not isinstance(pkg, Package):
4718 # a satisfied blocker
4720 root_libc_pkgs = libc_pkgs.get(pkg.root)
4721 if root_libc_pkgs is not None and \
4722 pkg.operation == "merge":
4723 if pkg in root_libc_pkgs:
4724 earlier_libc_pkgs.add(pkg)
4726 for libc_pkg in root_libc_pkgs:
4727 if libc_pkg in earlier_libc_pkgs:
4728 graph.add(libc_pkg, pkg,
4729 priority=DepPriority(buildtime=True))
4731 def schedulerGraph(self):
4733 The scheduler graph is identical to the normal one except that
4734 uninstall edges are reversed in specific cases that require
4735 conflicting packages to be temporarily installed simultaneously.
4736 This is intended for use by the Scheduler in it's parallelization
4737 logic. It ensures that temporary simultaneous installation of
4738 conflicting packages is avoided when appropriate (especially for
4739 !!atom blockers), but allowed in specific cases that require it.
4741 Note that this method calls break_refs() which alters the state of
4742 internal Package instances such that this depgraph instance should
4743 not be used to perform any more calculations.
4746 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4747 mergelist = self.altlist()
4748 self._implicit_libc_deps(mergelist,
4749 self._dynamic_config._scheduler_graph)
4751 # Break DepPriority.satisfied attributes which reference
4752 # installed Package instances.
4753 for parents, children, node in \
4754 self._dynamic_config._scheduler_graph.nodes.values():
4755 for priorities in chain(parents.values(), children.values()):
4756 for priority in priorities:
4757 if priority.satisfied:
4758 priority.satisfied = True
4760 pkg_cache = self._frozen_config._pkg_cache
4761 graph = self._dynamic_config._scheduler_graph
4762 trees = self._frozen_config.trees
4763 pruned_pkg_cache = {}
4764 for key, pkg in pkg_cache.items():
4765 if pkg in graph or \
4766 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4767 pruned_pkg_cache[key] = pkg
4770 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4774 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4778 def break_refs(self):
4780 Break any references in Package instances that lead back to the depgraph.
4781 This is useful if you want to hold references to packages without also
4782 holding the depgraph on the heap. It should only be called after the
4783 depgraph and _frozen_config will not be used for any more calculations.
4785 for root_config in self._frozen_config.roots.values():
4786 root_config.update(self._frozen_config._trees_orig[
4787 root_config.root]["root_config"])
4788 # Both instances are now identical, so discard the
4789 # original which should have no other references.
4790 self._frozen_config._trees_orig[
4791 root_config.root]["root_config"] = root_config
4793 def _resolve_conflicts(self):
4794 if not self._complete_graph():
4795 raise self._unknown_internal_error()
4797 if not self._validate_blockers():
4798 self._dynamic_config._skip_restart = True
4799 raise self._unknown_internal_error()
4801 if self._dynamic_config._slot_collision_info:
4802 self._process_slot_conflicts()
4804 def _serialize_tasks(self):
4806 debug = "--debug" in self._frozen_config.myopts
4809 writemsg("\ndigraph:\n\n", noiselevel=-1)
4810 self._dynamic_config.digraph.debug_print()
4811 writemsg("\n", noiselevel=-1)
4813 scheduler_graph = self._dynamic_config.digraph.copy()
4815 if '--nodeps' in self._frozen_config.myopts:
4816 # Preserve the package order given on the command line.
4817 return ([node for node in scheduler_graph \
4818 if isinstance(node, Package) \
4819 and node.operation == 'merge'], scheduler_graph)
4821 mygraph=self._dynamic_config.digraph.copy()
4823 removed_nodes = set()
4825 # Prune off all DependencyArg instances since they aren't
4826 # needed, and because of nested sets this is faster than doing
4827 # it with multiple digraph.root_nodes() calls below. This also
4828 # takes care of nested sets that have circular references,
4829 # which wouldn't be matched by digraph.root_nodes().
4830 for node in mygraph:
4831 if isinstance(node, DependencyArg):
4832 removed_nodes.add(node)
4834 mygraph.difference_update(removed_nodes)
4835 removed_nodes.clear()
4837 # Prune "nomerge" root nodes if nothing depends on them, since
4838 # otherwise they slow down merge order calculation. Don't remove
4839 # non-root nodes since they help optimize merge order in some cases
4840 # such as revdep-rebuild.
4843 for node in mygraph.root_nodes():
4844 if not isinstance(node, Package) or \
4845 node.installed or node.onlydeps:
4846 removed_nodes.add(node)
4848 self._spinner_update()
4849 mygraph.difference_update(removed_nodes)
4850 if not removed_nodes:
4852 removed_nodes.clear()
4853 self._merge_order_bias(mygraph)
4854 def cmp_circular_bias(n1, n2):
4856 RDEPEND is stronger than PDEPEND and this function
4857 measures such a strength bias within a circular
4858 dependency relationship.
4860 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4861 ignore_priority=priority_range.ignore_medium_soft)
4862 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4863 ignore_priority=priority_range.ignore_medium_soft)
4864 if n1_n2_medium == n2_n1_medium:
4869 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4871 # Contains uninstall tasks that have been scheduled to
4872 # occur after overlapping blockers have been installed.
4873 scheduled_uninstalls = set()
4874 # Contains any Uninstall tasks that have been ignored
4875 # in order to avoid the circular deps code path. These
4876 # correspond to blocker conflicts that could not be
4878 ignored_uninstall_tasks = set()
4879 have_uninstall_task = False
4880 complete = "complete" in self._dynamic_config.myparams
4883 def get_nodes(**kwargs):
4885 Returns leaf nodes excluding Uninstall instances
4886 since those should be executed as late as possible.
4888 return [node for node in mygraph.leaf_nodes(**kwargs) \
4889 if isinstance(node, Package) and \
4890 (node.operation != "uninstall" or \
4891 node in scheduled_uninstalls)]
4893 # sys-apps/portage needs special treatment if ROOT="/"
4894 running_root = self._frozen_config._running_root.root
4895 runtime_deps = InternalPackageSet(
4896 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4897 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4898 PORTAGE_PACKAGE_ATOM)
4899 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4900 PORTAGE_PACKAGE_ATOM)
4903 running_portage = running_portage[0]
4905 running_portage = None
4907 if replacement_portage:
4908 replacement_portage = replacement_portage[0]
4910 replacement_portage = None
4912 if replacement_portage == running_portage:
4913 replacement_portage = None
4915 if replacement_portage is not None and \
4916 (running_portage is None or \
4917 running_portage.cpv != replacement_portage.cpv or \
4918 '9999' in replacement_portage.cpv or \
4919 'git' in replacement_portage.inherited or \
4920 'git-2' in replacement_portage.inherited):
4921 # update from running_portage to replacement_portage asap
4922 asap_nodes.append(replacement_portage)
4924 if running_portage is not None:
4926 portage_rdepend = self._select_atoms_highest_available(
4927 running_root, running_portage.metadata["RDEPEND"],
4928 myuse=self._pkg_use_enabled(running_portage),
4929 parent=running_portage, strict=False)
4930 except portage.exception.InvalidDependString as e:
4931 portage.writemsg("!!! Invalid RDEPEND in " + \
4932 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4933 (running_root, running_portage.cpv, e), noiselevel=-1)
4935 portage_rdepend = {running_portage : []}
4936 for atoms in portage_rdepend.values():
4937 runtime_deps.update(atom for atom in atoms \
4938 if not atom.blocker)
4940 # Merge libc asap, in order to account for implicit
4941 # dependencies. See bug #303567.
4942 implicit_libc_roots = (running_root,)
4943 for root in implicit_libc_roots:
4945 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4946 graphdb = self._dynamic_config.mydbapi[root]
4947 for atom in self._expand_virt_from_graph(root,
4948 portage.const.LIBC_PACKAGE_ATOM):
4951 match = graphdb.match_pkgs(atom)
4955 if pkg.operation == "merge" and \
4956 not vardb.cpv_exists(pkg.cpv):
4960 # If there's also an os-headers upgrade, we need to
4961 # pull that in first. See bug #328317.
4962 for atom in self._expand_virt_from_graph(root,
4963 portage.const.OS_HEADERS_PACKAGE_ATOM):
4966 match = graphdb.match_pkgs(atom)
4970 if pkg.operation == "merge" and \
4971 not vardb.cpv_exists(pkg.cpv):
4972 asap_nodes.append(pkg)
4974 asap_nodes.extend(libc_pkgs)
4976 def gather_deps(ignore_priority, mergeable_nodes,
4977 selected_nodes, node):
4979 Recursively gather a group of nodes that RDEPEND on
4980 eachother. This ensures that they are merged as a group
4981 and get their RDEPENDs satisfied as soon as possible.
4983 if node in selected_nodes:
4985 if node not in mergeable_nodes:
4987 if node == replacement_portage and \
4988 mygraph.child_nodes(node,
4989 ignore_priority=priority_range.ignore_medium_soft):
4990 # Make sure that portage always has all of it's
4991 # RDEPENDs installed first.
4993 selected_nodes.add(node)
4994 for child in mygraph.child_nodes(node,
4995 ignore_priority=ignore_priority):
4996 if not gather_deps(ignore_priority,
4997 mergeable_nodes, selected_nodes, child):
5001 def ignore_uninst_or_med(priority):
5002 if priority is BlockerDepPriority.instance:
5004 return priority_range.ignore_medium(priority)
5006 def ignore_uninst_or_med_soft(priority):
5007 if priority is BlockerDepPriority.instance:
5009 return priority_range.ignore_medium_soft(priority)
5011 tree_mode = "--tree" in self._frozen_config.myopts
5012 # Tracks whether or not the current iteration should prefer asap_nodes
5013 # if available. This is set to False when the previous iteration
5014 # failed to select any nodes. It is reset whenever nodes are
5015 # successfully selected.
5018 # Controls whether or not the current iteration should drop edges that
5019 # are "satisfied" by installed packages, in order to solve circular
5020 # dependencies. The deep runtime dependencies of installed packages are
5021 # not checked in this case (bug #199856), so it must be avoided
5022 # whenever possible.
5023 drop_satisfied = False
5025 # State of variables for successive iterations that loosen the
5026 # criteria for node selection.
5028 # iteration prefer_asap drop_satisfied
5033 # If no nodes are selected on the last iteration, it is due to
5034 # unresolved blockers or circular dependencies.
5037 self._spinner_update()
5038 selected_nodes = None
5039 ignore_priority = None
5040 if drop_satisfied or (prefer_asap and asap_nodes):
5041 priority_range = DepPrioritySatisfiedRange
5043 priority_range = DepPriorityNormalRange
5044 if prefer_asap and asap_nodes:
5045 # ASAP nodes are merged before their soft deps. Go ahead and
5046 # select root nodes here if necessary, since it's typical for
5047 # the parent to have been removed from the graph already.
5048 asap_nodes = [node for node in asap_nodes \
5049 if mygraph.contains(node)]
5050 for i in range(priority_range.SOFT,
5051 priority_range.MEDIUM_SOFT + 1):
5052 ignore_priority = priority_range.ignore_priority[i]
5053 for node in asap_nodes:
5054 if not mygraph.child_nodes(node,
5055 ignore_priority=ignore_priority):
5056 selected_nodes = [node]
5057 asap_nodes.remove(node)
5062 if not selected_nodes and \
5063 not (prefer_asap and asap_nodes):
5064 for i in range(priority_range.NONE,
5065 priority_range.MEDIUM_SOFT + 1):
5066 ignore_priority = priority_range.ignore_priority[i]
5067 nodes = get_nodes(ignore_priority=ignore_priority)
5069 # If there is a mixture of merges and uninstalls,
5070 # do the uninstalls first.
5071 good_uninstalls = None
5073 good_uninstalls = []
5075 if node.operation == "uninstall":
5076 good_uninstalls.append(node)
5079 nodes = good_uninstalls
5083 if good_uninstalls or len(nodes) == 1 or \
5084 (ignore_priority is None and \
5085 not asap_nodes and not tree_mode):
5086 # Greedily pop all of these nodes since no
5087 # relationship has been ignored. This optimization
5088 # destroys --tree output, so it's disabled in tree
5090 selected_nodes = nodes
5092 # For optimal merge order:
5093 # * Only pop one node.
5094 # * Removing a root node (node without a parent)
5095 # will not produce a leaf node, so avoid it.
5096 # * It's normal for a selected uninstall to be a
5097 # root node, so don't check them for parents.
5099 prefer_asap_parents = (True, False)
5101 prefer_asap_parents = (False,)
5102 for check_asap_parent in prefer_asap_parents:
5103 if check_asap_parent:
5105 parents = mygraph.parent_nodes(node,
5106 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5107 if parents and set(parents).intersection(asap_nodes):
5108 selected_nodes = [node]
5112 if mygraph.parent_nodes(node):
5113 selected_nodes = [node]
5120 if not selected_nodes:
5121 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
5123 mergeable_nodes = set(nodes)
5124 if prefer_asap and asap_nodes:
5126 # When gathering the nodes belonging to a runtime cycle,
5127 # we want to minimize the number of nodes gathered, since
5128 # this tends to produce a more optimal merge order.
5129 # Ignoring all medium_soft deps serves this purpose.
5130 # In the case of multiple runtime cycles, where some cycles
5131 # may depend on smaller independent cycles, it's optimal
5132 # to merge smaller independent cycles before other cycles
5133 # that depend on them. Therefore, we search for the
5134 # smallest cycle in order to try and identify and prefer
5135 # these smaller independent cycles.
5136 ignore_priority = priority_range.ignore_medium_soft
5137 smallest_cycle = None
5139 if not mygraph.parent_nodes(node):
5141 selected_nodes = set()
5142 if gather_deps(ignore_priority,
5143 mergeable_nodes, selected_nodes, node):
5144 # When selecting asap_nodes, we need to ensure
5145 # that we haven't selected a large runtime cycle
5146 # that is obviously sub-optimal. This will be
5147 # obvious if any of the non-asap selected_nodes
5148 # is a leaf node when medium_soft deps are
5150 if prefer_asap and asap_nodes and \
5151 len(selected_nodes) > 1:
5152 for node in selected_nodes.difference(
5154 if not mygraph.child_nodes(node,
5156 DepPriorityNormalRange.ignore_medium_soft):
5157 selected_nodes = None
5160 if smallest_cycle is None or \
5161 len(selected_nodes) < len(smallest_cycle):
5162 smallest_cycle = selected_nodes
5164 selected_nodes = smallest_cycle
5166 if selected_nodes and debug:
5167 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
5168 (len(selected_nodes),), noiselevel=-1)
5169 cycle_digraph = mygraph.copy()
5170 cycle_digraph.difference_update([x for x in
5171 cycle_digraph if x not in selected_nodes])
5172 cycle_digraph.debug_print()
5173 writemsg("\n", noiselevel=-1)
5175 if prefer_asap and asap_nodes and not selected_nodes:
5176 # We failed to find any asap nodes to merge, so ignore
5177 # them for the next iteration.
5181 if selected_nodes and ignore_priority is not None:
5182 # Try to merge ignored medium_soft deps as soon as possible
5183 # if they're not satisfied by installed packages.
5184 for node in selected_nodes:
5185 children = set(mygraph.child_nodes(node))
5186 soft = children.difference(
5187 mygraph.child_nodes(node,
5188 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
5189 medium_soft = children.difference(
5190 mygraph.child_nodes(node,
5192 DepPrioritySatisfiedRange.ignore_medium_soft))
5193 medium_soft.difference_update(soft)
5194 for child in medium_soft:
5195 if child in selected_nodes:
5197 if child in asap_nodes:
5199 # Merge PDEPEND asap for bug #180045.
5200 asap_nodes.append(child)
5202 if selected_nodes and len(selected_nodes) > 1:
5203 if not isinstance(selected_nodes, list):
5204 selected_nodes = list(selected_nodes)
5205 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5207 if not selected_nodes and myblocker_uninstalls:
5208 # An Uninstall task needs to be executed in order to
5209 # avoid conflict if possible.
5212 priority_range = DepPrioritySatisfiedRange
5214 priority_range = DepPriorityNormalRange
5216 mergeable_nodes = get_nodes(
5217 ignore_priority=ignore_uninst_or_med)
5219 min_parent_deps = None
5222 for task in myblocker_uninstalls.leaf_nodes():
5223 # Do some sanity checks so that system or world packages
5224 # don't get uninstalled inappropriately here (only really
5225 # necessary when --complete-graph has not been enabled).
5227 if task in ignored_uninstall_tasks:
5230 if task in scheduled_uninstalls:
5231 # It's been scheduled but it hasn't
5232 # been executed yet due to dependence
5233 # on installation of blocking packages.
5236 root_config = self._frozen_config.roots[task.root]
5237 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5240 if self._dynamic_config.digraph.contains(inst_pkg):
5243 forbid_overlap = False
5244 heuristic_overlap = False
5245 for blocker in myblocker_uninstalls.parent_nodes(task):
5246 if not eapi_has_strong_blocks(blocker.eapi):
5247 heuristic_overlap = True
5248 elif blocker.atom.blocker.overlap.forbid:
5249 forbid_overlap = True
5251 if forbid_overlap and running_root == task.root:
5254 if heuristic_overlap and running_root == task.root:
5255 # Never uninstall sys-apps/portage or it's essential
5256 # dependencies, except through replacement.
5258 runtime_dep_atoms = \
5259 list(runtime_deps.iterAtomsForPackage(task))
5260 except portage.exception.InvalidDependString as e:
5261 portage.writemsg("!!! Invalid PROVIDE in " + \
5262 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5263 (task.root, task.cpv, e), noiselevel=-1)
5267 # Don't uninstall a runtime dep if it appears
5268 # to be the only suitable one installed.
5270 vardb = root_config.trees["vartree"].dbapi
5271 for atom in runtime_dep_atoms:
5272 other_version = None
5273 for pkg in vardb.match_pkgs(atom):
5274 if pkg.cpv == task.cpv and \
5275 pkg.metadata["COUNTER"] == \
5276 task.metadata["COUNTER"]:
5280 if other_version is None:
5286 # For packages in the system set, don't take
5287 # any chances. If the conflict can't be resolved
5288 # by a normal replacement operation then abort.
5291 for atom in root_config.sets[
5292 "system"].iterAtomsForPackage(task):
5295 except portage.exception.InvalidDependString as e:
5296 portage.writemsg("!!! Invalid PROVIDE in " + \
5297 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5298 (task.root, task.cpv, e), noiselevel=-1)
5304 # Note that the world check isn't always
5305 # necessary since self._complete_graph() will
5306 # add all packages from the system and world sets to the
5307 # graph. This just allows unresolved conflicts to be
5308 # detected as early as possible, which makes it possible
5309 # to avoid calling self._complete_graph() when it is
5310 # unnecessary due to blockers triggering an abortion.
5312 # For packages in the world set, go ahead an uninstall
5313 # when necessary, as long as the atom will be satisfied
5314 # in the final state.
5315 graph_db = self._dynamic_config.mydbapi[task.root]
5318 for atom in root_config.sets[
5319 "selected"].iterAtomsForPackage(task):
5321 for pkg in graph_db.match_pkgs(atom):
5328 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5330 except portage.exception.InvalidDependString as e:
5331 portage.writemsg("!!! Invalid PROVIDE in " + \
5332 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5333 (task.root, task.cpv, e), noiselevel=-1)
5339 # Check the deps of parent nodes to ensure that
5340 # the chosen task produces a leaf node. Maybe
5341 # this can be optimized some more to make the
5342 # best possible choice, but the current algorithm
5343 # is simple and should be near optimal for most
5345 self._spinner_update()
5346 mergeable_parent = False
5348 parent_deps.add(task)
5349 for parent in mygraph.parent_nodes(task):
5350 parent_deps.update(mygraph.child_nodes(parent,
5351 ignore_priority=priority_range.ignore_medium_soft))
5352 if min_parent_deps is not None and \
5353 len(parent_deps) >= min_parent_deps:
5354 # This task is no better than a previously selected
5355 # task, so abort search now in order to avoid wasting
5356 # any more cpu time on this task. This increases
5357 # performance dramatically in cases when there are
5358 # hundreds of blockers to solve, like when
5359 # upgrading to a new slot of kde-meta.
5360 mergeable_parent = None
5362 if parent in mergeable_nodes and \
5363 gather_deps(ignore_uninst_or_med_soft,
5364 mergeable_nodes, set(), parent):
5365 mergeable_parent = True
5367 if not mergeable_parent:
5370 if min_parent_deps is None or \
5371 len(parent_deps) < min_parent_deps:
5372 min_parent_deps = len(parent_deps)
5375 if uninst_task is not None and min_parent_deps == 1:
5376 # This is the best possible result, so so abort search
5377 # now in order to avoid wasting any more cpu time.
5380 if uninst_task is not None:
5381 # The uninstall is performed only after blocking
5382 # packages have been merged on top of it. File
5383 # collisions between blocking packages are detected
5384 # and removed from the list of files to be uninstalled.
5385 scheduled_uninstalls.add(uninst_task)
5386 parent_nodes = mygraph.parent_nodes(uninst_task)
5388 # Reverse the parent -> uninstall edges since we want
5389 # to do the uninstall after blocking packages have
5390 # been merged on top of it.
5391 mygraph.remove(uninst_task)
5392 for blocked_pkg in parent_nodes:
5393 mygraph.add(blocked_pkg, uninst_task,
5394 priority=BlockerDepPriority.instance)
5395 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5396 scheduler_graph.add(blocked_pkg, uninst_task,
5397 priority=BlockerDepPriority.instance)
5399 # Sometimes a merge node will render an uninstall
5400 # node unnecessary (due to occupying the same SLOT),
5401 # and we want to avoid executing a separate uninstall
5402 # task in that case.
5403 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5404 ].match_pkgs(uninst_task.slot_atom)
5406 slot_node[0].operation == "merge":
5407 mygraph.add(slot_node[0], uninst_task,
5408 priority=BlockerDepPriority.instance)
5410 # Reset the state variables for leaf node selection and
5411 # continue trying to select leaf nodes.
5413 drop_satisfied = False
5416 if not selected_nodes:
5417 # Only select root nodes as a last resort. This case should
5418 # only trigger when the graph is nearly empty and the only
5419 # remaining nodes are isolated (no parents or children). Since
5420 # the nodes must be isolated, ignore_priority is not needed.
5421 selected_nodes = get_nodes()
5423 if not selected_nodes and not drop_satisfied:
5424 drop_satisfied = True
5427 if not selected_nodes and myblocker_uninstalls:
5428 # If possible, drop an uninstall task here in order to avoid
5429 # the circular deps code path. The corresponding blocker will
5430 # still be counted as an unresolved conflict.
5432 for node in myblocker_uninstalls.leaf_nodes():
5434 mygraph.remove(node)
5439 ignored_uninstall_tasks.add(node)
5442 if uninst_task is not None:
5443 # Reset the state variables for leaf node selection and
5444 # continue trying to select leaf nodes.
5446 drop_satisfied = False
5449 if not selected_nodes:
5450 self._dynamic_config._circular_deps_for_display = mygraph
5451 self._dynamic_config._skip_restart = True
5452 raise self._unknown_internal_error()
5454 # At this point, we've succeeded in selecting one or more nodes, so
5455 # reset state variables for leaf node selection.
5457 drop_satisfied = False
5459 mygraph.difference_update(selected_nodes)
5461 for node in selected_nodes:
5462 if isinstance(node, Package) and \
5463 node.operation == "nomerge":
5466 # Handle interactions between blockers
5467 # and uninstallation tasks.
5468 solved_blockers = set()
5470 if isinstance(node, Package) and \
5471 "uninstall" == node.operation:
5472 have_uninstall_task = True
5475 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
5476 inst_pkg = vardb.match_pkgs(node.slot_atom)
5478 # The package will be replaced by this one, so remove
5479 # the corresponding Uninstall task if necessary.
5480 inst_pkg = inst_pkg[0]
5481 uninst_task = Package(built=inst_pkg.built,
5482 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5483 metadata=inst_pkg.metadata,
5484 operation="uninstall",
5485 root_config=inst_pkg.root_config,
5486 type_name=inst_pkg.type_name)
5488 mygraph.remove(uninst_task)
5492 if uninst_task is not None and \
5493 uninst_task not in ignored_uninstall_tasks and \
5494 myblocker_uninstalls.contains(uninst_task):
5495 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
5496 myblocker_uninstalls.remove(uninst_task)
5497 # Discard any blockers that this Uninstall solves.
5498 for blocker in blocker_nodes:
5499 if not myblocker_uninstalls.child_nodes(blocker):
5500 myblocker_uninstalls.remove(blocker)
5502 self._dynamic_config._unsolvable_blockers:
5503 solved_blockers.add(blocker)
5505 retlist.append(node)
5507 if (isinstance(node, Package) and \
5508 "uninstall" == node.operation) or \
5509 (uninst_task is not None and \
5510 uninst_task in scheduled_uninstalls):
5511 # Include satisfied blockers in the merge list
5512 # since the user might be interested and also
5513 # it serves as an indicator that blocking packages
5514 # will be temporarily installed simultaneously.
5515 for blocker in solved_blockers:
5516 retlist.append(blocker)
5518 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
5519 for node in myblocker_uninstalls.root_nodes():
5520 unsolvable_blockers.add(node)
5522 # If any Uninstall tasks need to be executed in order
5523 # to avoid a conflict, complete the graph with any
5524 # dependencies that may have been initially
5525 # neglected (to ensure that unsafe Uninstall tasks
5526 # are properly identified and blocked from execution).
5527 if have_uninstall_task and \
5529 not unsolvable_blockers:
5530 self._dynamic_config.myparams["complete"] = True
5531 if '--debug' in self._frozen_config.myopts:
5533 msg.append("enabling 'complete' depgraph mode " + \
5534 "due to uninstall task(s):")
5536 for node in retlist:
5537 if isinstance(node, Package) and \
5538 node.operation == 'uninstall':
5539 msg.append("\t%s" % (node,))
5540 writemsg_level("\n%s\n" % \
5541 "".join("%s\n" % line for line in msg),
5542 level=logging.DEBUG, noiselevel=-1)
5543 raise self._serialize_tasks_retry("")
5545 # Set satisfied state on blockers, but not before the
5546 # above retry path, since we don't want to modify the
5547 # state in that case.
5548 for node in retlist:
5549 if isinstance(node, Blocker):
5550 node.satisfied = True
5552 for blocker in unsolvable_blockers:
5553 retlist.append(blocker)
5555 if unsolvable_blockers and \
5556 not self._accept_blocker_conflicts():
5557 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
5558 self._dynamic_config._serialized_tasks_cache = retlist[:]
5559 self._dynamic_config._scheduler_graph = scheduler_graph
5560 self._dynamic_config._skip_restart = True
5561 raise self._unknown_internal_error()
5563 if self._dynamic_config._slot_collision_info and \
5564 not self._accept_blocker_conflicts():
5565 self._dynamic_config._serialized_tasks_cache = retlist[:]
5566 self._dynamic_config._scheduler_graph = scheduler_graph
5567 raise self._unknown_internal_error()
5569 return retlist, scheduler_graph
5571 def _show_circular_deps(self, mygraph):
5572 self._dynamic_config._circular_dependency_handler = \
5573 circular_dependency_handler(self, mygraph)
5574 handler = self._dynamic_config._circular_dependency_handler
5576 self._frozen_config.myopts.pop("--quiet", None)
5577 self._frozen_config.myopts["--verbose"] = True
5578 self._frozen_config.myopts["--tree"] = True
5579 portage.writemsg("\n\n", noiselevel=-1)
5580 self.display(handler.merge_list)
5581 prefix = colorize("BAD", " * ")
5582 portage.writemsg("\n", noiselevel=-1)
5583 portage.writemsg(prefix + "Error: circular dependencies:\n",
5585 portage.writemsg("\n", noiselevel=-1)
5587 if handler.circular_dep_message is None:
5588 handler.debug_print()
5589 portage.writemsg("\n", noiselevel=-1)
5591 if handler.circular_dep_message is not None:
5592 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
5594 suggestions = handler.suggestions
5596 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
5597 if len(suggestions) == 1:
5598 writemsg("by applying the following change:\n", noiselevel=-1)
5600 writemsg("by applying " + colorize("bold", "any of") + \
5601 " the following changes:\n", noiselevel=-1)
5602 writemsg("".join(suggestions), noiselevel=-1)
5603 writemsg("\nNote that this change can be reverted, once the package has" + \
5604 " been installed.\n", noiselevel=-1)
5605 if handler.large_cycle_count:
5606 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
5607 "Several changes might be required to resolve all cycles.\n" + \
5608 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
5610 writemsg("\n\n", noiselevel=-1)
5611 writemsg(prefix + "Note that circular dependencies " + \
5612 "can often be avoided by temporarily\n", noiselevel=-1)
5613 writemsg(prefix + "disabling USE flags that trigger " + \
5614 "optional dependencies.\n", noiselevel=-1)
5616 def _show_merge_list(self):
5617 if self._dynamic_config._serialized_tasks_cache is not None and \
5618 not (self._dynamic_config._displayed_list is not None and \
5619 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
5620 self._dynamic_config._displayed_list == \
5621 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
5622 display_list = self._dynamic_config._serialized_tasks_cache[:]
5623 if "--tree" in self._frozen_config.myopts:
5624 display_list.reverse()
5625 self.display(display_list)
5627 def _show_unsatisfied_blockers(self, blockers):
5628 self._show_merge_list()
5629 msg = "Error: The above package list contains " + \
5630 "packages which cannot be installed " + \
5631 "at the same time on the same system."
5632 prefix = colorize("BAD", " * ")
5633 portage.writemsg("\n", noiselevel=-1)
5634 for line in textwrap.wrap(msg, 70):
5635 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5637 # Display the conflicting packages along with the packages
5638 # that pulled them in. This is helpful for troubleshooting
5639 # cases in which blockers don't solve automatically and
5640 # the reasons are not apparent from the normal merge list
5644 for blocker in blockers:
5645 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
5646 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
5647 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
5648 if not parent_atoms:
5649 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
5650 if atom is not None:
5651 parent_atoms = set([("@selected", atom)])
5653 conflict_pkgs[pkg] = parent_atoms
5656 # Reduce noise by pruning packages that are only
5657 # pulled in by other conflict packages.
5659 for pkg, parent_atoms in conflict_pkgs.items():
5660 relevant_parent = False
5661 for parent, atom in parent_atoms:
5662 if parent not in conflict_pkgs:
5663 relevant_parent = True
5665 if not relevant_parent:
5666 pruned_pkgs.add(pkg)
5667 for pkg in pruned_pkgs:
5668 del conflict_pkgs[pkg]
5674 for pkg, parent_atoms in conflict_pkgs.items():
5676 # Prefer packages that are not directly involved in a conflict.
5677 # It can be essential to see all the packages here, so don't
5678 # omit any. If the list is long, people can simply use a pager.
5679 preferred_parents = set()
5680 for parent_atom in parent_atoms:
5681 parent, atom = parent_atom
5682 if parent not in conflict_pkgs:
5683 preferred_parents.add(parent_atom)
5685 ordered_list = list(preferred_parents)
5686 if len(parent_atoms) > len(ordered_list):
5687 for parent_atom in parent_atoms:
5688 if parent_atom not in preferred_parents:
5689 ordered_list.append(parent_atom)
5691 msg.append(indent + "%s pulled in by\n" % pkg)
5693 for parent_atom in ordered_list:
5694 parent, atom = parent_atom
5695 msg.append(2*indent)
5696 if isinstance(parent,
5697 (PackageArg, AtomArg)):
5698 # For PackageArg and AtomArg types, it's
5699 # redundant to display the atom attribute.
5700 msg.append(str(parent))
5702 # Display the specific atom from SetArg or
5704 msg.append("%s required by %s" % (atom, parent))
5709 writemsg("".join(msg), noiselevel=-1)
5711 if "--quiet" not in self._frozen_config.myopts:
5712 show_blocker_docs_link()
5714 def display(self, mylist, favorites=[], verbosity=None):
5716 # This is used to prevent display_problems() from
5717 # redundantly displaying this exact same merge list
5718 # again via _show_merge_list().
5719 self._dynamic_config._displayed_list = mylist
5722 return display(self, mylist, favorites, verbosity)
5724 def _display_autounmask(self):
5726 Display --autounmask message and optionally write it to config files
5727 (using CONFIG_PROTECT). The message includes the comments and the changes.
5730 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
5731 autounmask_unrestricted_atoms = \
5732 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
5733 quiet = "--quiet" in self._frozen_config.myopts
5734 pretend = "--pretend" in self._frozen_config.myopts
5735 ask = "--ask" in self._frozen_config.myopts
5736 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
5738 def check_if_latest(pkg):
5740 is_latest_in_slot = True
5741 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
5742 root_config = self._frozen_config.roots[pkg.root]
5744 for db, pkg_type, built, installed, db_keys in dbs:
5745 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
5746 if other_pkg.cp != pkg.cp:
5747 # old-style PROVIDE virtual means there are no
5748 # normal matches for this pkg_type
5752 if other_pkg.slot_atom == pkg.slot_atom:
5753 is_latest_in_slot = False
5756 # iter_match_pkgs yields highest version first, so
5757 # there's no need to search this pkg_type any further
5760 if not is_latest_in_slot:
5763 return is_latest, is_latest_in_slot
5765 #Set of roots we have autounmask changes for.
5768 unstable_keyword_msg = {}
5769 for pkg in self._dynamic_config._needed_unstable_keywords:
5770 self._show_merge_list()
5771 if pkg in self._dynamic_config.digraph:
5774 unstable_keyword_msg.setdefault(root, [])
5775 is_latest, is_latest_in_slot = check_if_latest(pkg)
5776 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5777 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5778 use=self._pkg_use_enabled(pkg))
5779 for reason in mreasons:
5780 if reason.unmask_hint and \
5781 reason.unmask_hint.key == 'unstable keyword':
5782 keyword = reason.unmask_hint.value
5784 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
5785 if autounmask_unrestricted_atoms:
5787 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
5788 elif is_latest_in_slot:
5789 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
5791 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5793 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5795 p_mask_change_msg = {}
5796 for pkg in self._dynamic_config._needed_p_mask_changes:
5797 self._show_merge_list()
5798 if pkg in self._dynamic_config.digraph:
5801 p_mask_change_msg.setdefault(root, [])
5802 is_latest, is_latest_in_slot = check_if_latest(pkg)
5803 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5804 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5805 use=self._pkg_use_enabled(pkg))
5806 for reason in mreasons:
5807 if reason.unmask_hint and \
5808 reason.unmask_hint.key == 'p_mask':
5809 keyword = reason.unmask_hint.value
5811 comment, filename = portage.getmaskingreason(
5812 pkg.cpv, metadata=pkg.metadata,
5813 settings=pkgsettings,
5814 portdb=pkg.root_config.trees["porttree"].dbapi,
5815 return_location=True)
5817 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
5819 p_mask_change_msg[root].append("# %s:\n" % filename)
5821 comment = [line for line in
5822 comment.splitlines() if line]
5823 for line in comment:
5824 p_mask_change_msg[root].append("%s\n" % line)
5825 if autounmask_unrestricted_atoms:
5827 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
5828 elif is_latest_in_slot:
5829 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
5831 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5833 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5835 use_changes_msg = {}
5836 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
5837 self._show_merge_list()
5838 if pkg in self._dynamic_config.digraph:
5841 use_changes_msg.setdefault(root, [])
5842 is_latest, is_latest_in_slot = check_if_latest(pkg)
5843 changes = needed_use_config_change[1]
5845 for flag, state in changes.items():
5847 adjustments.append(flag)
5849 adjustments.append("-" + flag)
5850 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5851 if autounmask_unrestricted_atoms:
5853 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5854 elif is_latest_in_slot:
5855 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
5857 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5859 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5862 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5863 self._show_merge_list()
5864 if pkg in self._dynamic_config.digraph:
5867 license_msg.setdefault(root, [])
5868 is_latest, is_latest_in_slot = check_if_latest(pkg)
5870 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
5871 if autounmask_unrestricted_atoms:
5873 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5874 elif is_latest_in_slot:
5875 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
5877 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5879 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5881 def find_config_file(abs_user_config, file_name):
5883 Searches /etc/portage for an appropriate file to append changes to.
5884 If the file_name is a file it is returned, if it is a directory, the
5885 last file in it is returned. Order of traversal is the identical to
5886 portage.util.grablines(recursive=True).
5888 file_name - String containing a file name like "package.use"
5889 return value - String. Absolute path of file to write to. None if
5890 no suitable file exists.
5892 file_path = os.path.join(abs_user_config, file_name)
5896 except OSError as e:
5897 if e.errno == errno.ENOENT:
5898 # The file doesn't exist, so we'll
5902 # Disk or file system trouble?
5905 last_file_path = None
5914 if stat.S_ISREG(st.st_mode):
5916 elif stat.S_ISDIR(st.st_mode):
5917 if os.path.basename(p) in _ignorecvs_dirs:
5920 contents = os.listdir(p)
5924 contents.sort(reverse=True)
5925 for child in contents:
5926 if child.startswith(".") or \
5927 child.endswith("~"):
5929 stack.append(os.path.join(p, child))
5931 return last_file_path
5933 write_to_file = autounmask_write and not pretend
5934 #Make sure we have a file to write to before doing any write.
5935 file_to_write_to = {}
5939 settings = self._frozen_config.roots[root].settings
5940 abs_user_config = os.path.join(
5941 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
5943 if root in unstable_keyword_msg:
5944 if not os.path.exists(os.path.join(abs_user_config,
5945 "package.keywords")):
5946 filename = "package.accept_keywords"
5948 filename = "package.keywords"
5949 file_to_write_to[(abs_user_config, "package.keywords")] = \
5950 find_config_file(abs_user_config, filename)
5952 if root in p_mask_change_msg:
5953 file_to_write_to[(abs_user_config, "package.unmask")] = \
5954 find_config_file(abs_user_config, "package.unmask")
5956 if root in use_changes_msg:
5957 file_to_write_to[(abs_user_config, "package.use")] = \
5958 find_config_file(abs_user_config, "package.use")
5960 if root in license_msg:
5961 file_to_write_to[(abs_user_config, "package.license")] = \
5962 find_config_file(abs_user_config, "package.license")
5964 for (abs_user_config, f), path in file_to_write_to.items():
5966 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
5968 write_to_file = not problems
5971 settings = self._frozen_config.roots[root].settings
5972 abs_user_config = os.path.join(
5973 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
5976 writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
5978 if root in unstable_keyword_msg:
5979 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
5980 " are necessary to proceed:\n", noiselevel=-1)
5981 writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
5983 if root in p_mask_change_msg:
5984 writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
5985 " are necessary to proceed:\n", noiselevel=-1)
5986 writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
5988 if root in use_changes_msg:
5989 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
5990 " are necessary to proceed:\n", noiselevel=-1)
5991 writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
5993 if root in license_msg:
5994 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
5995 " are necessary to proceed:\n", noiselevel=-1)
5996 writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
6001 settings = self._frozen_config.roots[root].settings
6002 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6003 shlex_split(settings.get("CONFIG_PROTECT", "")),
6004 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6006 def write_changes(root, changes, file_to_write_to):
6007 file_contents = None
6009 file_contents = io.open(
6010 _unicode_encode(file_to_write_to,
6011 encoding=_encodings['fs'], errors='strict'),
6012 mode='r', encoding=_encodings['content'],
6013 errors='replace').readlines()
6014 except IOError as e:
6015 if e.errno == errno.ENOENT:
6018 problems.append("!!! Failed to read '%s': %s\n" % \
6019 (file_to_write_to, e))
6020 if file_contents is not None:
6021 file_contents.extend(changes)
6022 if protect_obj[root].isprotected(file_to_write_to):
6023 # We want to force new_protect_filename to ensure
6024 # that the user will see all our changes via
6025 # etc-update, even if file_to_write_to doesn't
6026 # exist yet, so we specify force=True.
6027 file_to_write_to = new_protect_filename(file_to_write_to,
6030 write_atomic(file_to_write_to, "".join(file_contents))
6031 except PortageException:
6032 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6035 (unstable_keyword_msg or \
6036 p_mask_change_msg or \
6037 use_changes_msg or \
6041 "NOTE: This --autounmask behavior can be disabled by setting",
6042 " EMERGE_DEFAULT_OPTS=\"--autounmask=n\" in make.conf."
6046 line = colorize("INFORM", line)
6047 writemsg_stdout(line + "\n", noiselevel=-1)
6049 if ask and write_to_file and file_to_write_to:
6050 prompt = "\nWould you like to add these " + \
6051 "changes to your config files?"
6052 if userquery(prompt, enter_invalid) == 'No':
6053 write_to_file = False
6055 if write_to_file and file_to_write_to:
6057 settings = self._frozen_config.roots[root].settings
6058 abs_user_config = os.path.join(
6059 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6060 ensure_dirs(abs_user_config)
6062 if root in unstable_keyword_msg:
6063 write_changes(root, unstable_keyword_msg[root],
6064 file_to_write_to.get((abs_user_config, "package.keywords")))
6066 if root in p_mask_change_msg:
6067 write_changes(root, p_mask_change_msg[root],
6068 file_to_write_to.get((abs_user_config, "package.unmask")))
6070 if root in use_changes_msg:
6071 write_changes(root, use_changes_msg[root],
6072 file_to_write_to.get((abs_user_config, "package.use")))
6074 if root in license_msg:
6075 write_changes(root, license_msg[root],
6076 file_to_write_to.get((abs_user_config, "package.license")))
6079 writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
6081 writemsg_stdout("".join(problems), noiselevel=-1)
6082 elif write_to_file and roots:
6083 writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
6085 elif not pretend and not autounmask_write and roots:
6086 writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
6090 def display_problems(self):
6092 Display problems with the dependency graph such as slot collisions.
6093 This is called internally by display() to show the problems _after_
6094 the merge list where it is most likely to be seen, but if display()
6095 is not going to be called then this method should be called explicitly
6096 to ensure that the user is notified of problems with the graph.
6098 All output goes to stderr, except for unsatisfied dependencies which
6099 go to stdout for parsing by programs such as autounmask.
6102 # Note that show_masked_packages() sends its output to
6103 # stdout, and some programs such as autounmask parse the
6104 # output in cases when emerge bails out. However, when
6105 # show_masked_packages() is called for installed packages
6106 # here, the message is a warning that is more appropriate
6107 # to send to stderr, so temporarily redirect stdout to
6108 # stderr. TODO: Fix output code so there's a cleaner way
6109 # to redirect everything to stderr.
6114 sys.stdout = sys.stderr
6115 self._display_problems()
6121 # This goes to stdout for parsing by programs like autounmask.
6122 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6123 self._show_unsatisfied_dep(*pargs, **kwargs)
6125 def _display_problems(self):
6126 if self._dynamic_config._circular_deps_for_display is not None:
6127 self._show_circular_deps(
6128 self._dynamic_config._circular_deps_for_display)
6130 # The user is only notified of a slot conflict if
6131 # there are no unresolvable blocker conflicts.
6132 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
6133 self._show_unsatisfied_blockers(
6134 self._dynamic_config._unsatisfied_blockers_for_display)
6135 elif self._dynamic_config._slot_collision_info:
6136 self._show_slot_collision_notice()
6138 self._show_missed_update()
6140 self._display_autounmask()
6142 # TODO: Add generic support for "set problem" handlers so that
6143 # the below warnings aren't special cases for world only.
6145 if self._dynamic_config._missing_args:
6146 world_problems = False
6147 if "world" in self._dynamic_config.sets[
6148 self._frozen_config.target_root].sets:
6149 # Filter out indirect members of world (from nested sets)
6150 # since only direct members of world are desired here.
6151 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
6152 for arg, atom in self._dynamic_config._missing_args:
6153 if arg.name in ("selected", "world") and atom in world_set:
6154 world_problems = True
6158 sys.stderr.write("\n!!! Problems have been " + \
6159 "detected with your world file\n")
6160 sys.stderr.write("!!! Please run " + \
6161 green("emaint --check world")+"\n\n")
6163 if self._dynamic_config._missing_args:
6164 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6165 " Ebuilds for the following packages are either all\n")
6166 sys.stderr.write(colorize("BAD", "!!!") + \
6167 " masked or don't exist:\n")
6168 sys.stderr.write(" ".join(str(atom) for arg, atom in \
6169 self._dynamic_config._missing_args) + "\n")
6171 if self._dynamic_config._pprovided_args:
6173 for arg, atom in self._dynamic_config._pprovided_args:
6174 if isinstance(arg, SetArg):
6176 arg_atom = (atom, atom)
6179 arg_atom = (arg.arg, atom)
6180 refs = arg_refs.setdefault(arg_atom, [])
6181 if parent not in refs:
6184 msg.append(bad("\nWARNING: "))
6185 if len(self._dynamic_config._pprovided_args) > 1:
6186 msg.append("Requested packages will not be " + \
6187 "merged because they are listed in\n")
6189 msg.append("A requested package will not be " + \
6190 "merged because it is listed in\n")
6191 msg.append("package.provided:\n\n")
6192 problems_sets = set()
6193 for (arg, atom), refs in arg_refs.items():
6196 problems_sets.update(refs)
6198 ref_string = ", ".join(["'%s'" % name for name in refs])
6199 ref_string = " pulled in by " + ref_string
6200 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
6202 if "selected" in problems_sets or "world" in problems_sets:
6203 msg.append("This problem can be solved in one of the following ways:\n\n")
6204 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
6205 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
6206 msg.append(" C) Remove offending entries from package.provided.\n\n")
6207 msg.append("The best course of action depends on the reason that an offending\n")
6208 msg.append("package.provided entry exists.\n\n")
6209 sys.stderr.write("".join(msg))
6211 masked_packages = []
6212 for pkg in self._dynamic_config._masked_license_updates:
6213 root_config = pkg.root_config
6214 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6215 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
6216 masked_packages.append((root_config, pkgsettings,
6217 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6219 writemsg("\n" + colorize("BAD", "!!!") + \
6220 " The following updates are masked by LICENSE changes:\n",
6222 show_masked_packages(masked_packages)
6224 writemsg("\n", noiselevel=-1)
6226 masked_packages = []
6227 for pkg in self._dynamic_config._masked_installed:
6228 root_config = pkg.root_config
6229 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6230 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
6231 masked_packages.append((root_config, pkgsettings,
6232 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6234 writemsg("\n" + colorize("BAD", "!!!") + \
6235 " The following installed packages are masked:\n",
6237 show_masked_packages(masked_packages)
6239 writemsg("\n", noiselevel=-1)
6241 def saveNomergeFavorites(self):
6242 """Find atoms in favorites that are not in the mergelist and add them
6243 to the world file if necessary."""
6244 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6245 "--oneshot", "--onlydeps", "--pretend"):
6246 if x in self._frozen_config.myopts:
6248 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6249 world_set = root_config.sets["selected"]
6251 world_locked = False
6252 if hasattr(world_set, "lock"):
6256 if hasattr(world_set, "load"):
6257 world_set.load() # maybe it's changed on disk
6259 args_set = self._dynamic_config.sets[
6260 self._frozen_config.target_root].sets['__non_set_args__']
6261 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
6262 added_favorites = set()
6263 for x in self._dynamic_config._set_nodes:
6264 if x.operation != "nomerge":
6267 if x.root != root_config.root:
6271 myfavkey = create_world_atom(x, args_set, root_config)
6273 if myfavkey in added_favorites:
6275 added_favorites.add(myfavkey)
6276 except portage.exception.InvalidDependString as e:
6277 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6278 (x.cpv, e), noiselevel=-1)
6279 writemsg("!!! see '%s'\n\n" % os.path.join(
6280 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6283 for arg in self._dynamic_config._initial_arg_list:
6284 if not isinstance(arg, SetArg):
6286 if arg.root_config.root != root_config.root:
6289 if k in ("selected", "world") or \
6290 not root_config.sets[k].world_candidate:
6295 all_added.append(SETPREFIX + k)
6296 all_added.extend(added_favorites)
6300 ">>> Recording %s in \"world\" favorites file...\n" % \
6301 colorize("INFORM", str(a)), noiselevel=-1)
6303 world_set.update(all_added)
6308 def _loadResumeCommand(self, resume_data, skip_masked=True,
6311 Add a resume command to the graph and validate it in the process. This
6312 will raise a PackageNotFound exception if a package is not available.
6317 if not isinstance(resume_data, dict):
6320 mergelist = resume_data.get("mergelist")
6321 if not isinstance(mergelist, list):
6324 favorites = resume_data.get("favorites")
6325 args_set = self._dynamic_config.sets[
6326 self._frozen_config.target_root].sets['__non_set_args__']
6327 if isinstance(favorites, list):
6328 args = self._load_favorites(favorites)
6332 fakedb = self._dynamic_config.mydbapi
6333 trees = self._frozen_config.trees
6334 serialized_tasks = []
6337 if not (isinstance(x, list) and len(x) == 4):
6339 pkg_type, myroot, pkg_key, action = x
6340 if pkg_type not in self.pkg_tree_map:
6342 if action != "merge":
6344 root_config = self._frozen_config.roots[myroot]
6346 # Use the resume "favorites" list to see if a repo was specified
6348 depgraph_sets = self._dynamic_config.sets[root_config.root]
6350 for atom in depgraph_sets.atoms.getAtoms():
6351 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6355 atom = "=" + pkg_key
6357 atom = atom + _repo_separator + repo
6360 atom = Atom(atom, allow_repo=True)
6365 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6366 if not self._pkg_visibility_check(pkg) or \
6367 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6368 modified_use=self._pkg_use_enabled(pkg)):
6373 # It does no exist or it is corrupt.
6375 # TODO: log these somewhere
6377 raise portage.exception.PackageNotFound(pkg_key)
6379 if "merge" == pkg.operation and \
6380 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6381 modified_use=self._pkg_use_enabled(pkg)):
6384 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6386 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6388 self._dynamic_config._unsatisfied_deps_for_display.append(
6389 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6391 fakedb[myroot].cpv_inject(pkg)
6392 serialized_tasks.append(pkg)
6393 self._spinner_update()
6395 if self._dynamic_config._unsatisfied_deps_for_display:
6398 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6399 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6400 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6402 self._select_package = self._select_pkg_from_graph
6403 self._dynamic_config.myparams["selective"] = True
6404 # Always traverse deep dependencies in order to account for
6405 # potentially unsatisfied dependencies of installed packages.
6406 # This is necessary for correct --keep-going or --resume operation
6407 # in case a package from a group of circularly dependent packages
6408 # fails. In this case, a package which has recently been installed
6409 # may have an unsatisfied circular dependency (pulled in by
6410 # PDEPEND, for example). So, even though a package is already
6411 # installed, it may not have all of it's dependencies satisfied, so
6412 # it may not be usable. If such a package is in the subgraph of
6413 # deep depenedencies of a scheduled build, that build needs to
6414 # be cancelled. In order for this type of situation to be
6415 # recognized, deep traversal of dependencies is required.
6416 self._dynamic_config.myparams["deep"] = True
6418 for task in serialized_tasks:
6419 if isinstance(task, Package) and \
6420 task.operation == "merge":
6421 if not self._add_pkg(task, None):
6424 # Packages for argument atoms need to be explicitly
6425 # added via _add_pkg() so that they are included in the
6426 # digraph (needed at least for --tree display).
6427 for arg in self._expand_set_args(args, add_to_digraph=True):
6428 for atom in arg.pset.getAtoms():
6429 pkg, existing_node = self._select_package(
6430 arg.root_config.root, atom)
6431 if existing_node is None and \
6433 if not self._add_pkg(pkg, Dependency(atom=atom,
6434 root=pkg.root, parent=arg)):
6437 # Allow unsatisfied deps here to avoid showing a masking
6438 # message for an unsatisfied dep that isn't necessarily
6440 if not self._create_graph(allow_unsatisfied=True):
6443 unsatisfied_deps = []
6444 for dep in self._dynamic_config._unsatisfied_deps:
6445 if not isinstance(dep.parent, Package):
6447 if dep.parent.operation == "merge":
6448 unsatisfied_deps.append(dep)
6451 # For unsatisfied deps of installed packages, only account for
6452 # them if they are in the subgraph of dependencies of a package
6453 # which is scheduled to be installed.
6454 unsatisfied_install = False
6456 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6458 node = dep_stack.pop()
6459 if not isinstance(node, Package):
6461 if node.operation == "merge":
6462 unsatisfied_install = True
6464 if node in traversed:
6467 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
6469 if unsatisfied_install:
6470 unsatisfied_deps.append(dep)
6472 if masked_tasks or unsatisfied_deps:
6473 # This probably means that a required package
6474 # was dropped via --skipfirst. It makes the
6475 # resume list invalid, so convert it to a
6476 # UnsatisfiedResumeDep exception.
6477 raise self.UnsatisfiedResumeDep(self,
6478 masked_tasks + unsatisfied_deps)
6479 self._dynamic_config._serialized_tasks_cache = None
6482 except self._unknown_internal_error:
6487 def _load_favorites(self, favorites):
6489 Use a list of favorites to resume state from a
6490 previous select_files() call. This creates similar
6491 DependencyArg instances to those that would have
6492 been created by the original select_files() call.
6493 This allows Package instances to be matched with
6494 DependencyArg instances during graph creation.
6496 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6497 sets = root_config.sets
6498 depgraph_sets = self._dynamic_config.sets[root_config.root]
6501 if not isinstance(x, basestring):
6503 if x in ("system", "world"):
6505 if x.startswith(SETPREFIX):
6506 s = x[len(SETPREFIX):]
6509 if s in depgraph_sets.sets:
6512 depgraph_sets.sets[s] = pset
6513 args.append(SetArg(arg=x, pset=pset,
6514 root_config=root_config))
6517 x = Atom(x, allow_repo=True)
6518 except portage.exception.InvalidAtom:
6520 args.append(AtomArg(arg=x, atom=x,
6521 root_config=root_config))
6523 self._set_args(args)
6526 class UnsatisfiedResumeDep(portage.exception.PortageException):
6528 A dependency of a resume list is not installed. This
6529 can occur when a required package is dropped from the
6530 merge list via --skipfirst.
6532 def __init__(self, depgraph, value):
6533 portage.exception.PortageException.__init__(self, value)
6534 self.depgraph = depgraph
6536 class _internal_exception(portage.exception.PortageException):
6537 def __init__(self, value=""):
6538 portage.exception.PortageException.__init__(self, value)
6540 class _unknown_internal_error(_internal_exception):
6542 Used by the depgraph internally to terminate graph creation.
6543 The specific reason for the failure should have been dumped
6544 to stderr, unfortunately, the exact reason for the failure
6548 class _serialize_tasks_retry(_internal_exception):
6550 This is raised by the _serialize_tasks() method when it needs to
6551 be called again for some reason. The only case that it's currently
6552 used for is when neglected dependencies need to be added to the
6553 graph in order to avoid making a potentially unsafe decision.
6556 class _backtrack_mask(_internal_exception):
6558 This is raised by _show_unsatisfied_dep() when it's called with
6559 check_backtrack=True and a matching package has been masked by
6563 class _autounmask_breakage(_internal_exception):
6565 This is raised by _show_unsatisfied_dep() when it's called with
6566 check_autounmask_breakage=True and a matching package has been
6567 been disqualified due to autounmask changes.
6570 def need_restart(self):
6571 return self._dynamic_config._need_restart and \
6572 not self._dynamic_config._skip_restart
6574 def success_without_autounmask(self):
6575 return self._dynamic_config._success_without_autounmask
6577 def autounmask_breakage_detected(self):
6579 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6580 self._show_unsatisfied_dep(
6581 *pargs, check_autounmask_breakage=True, **kwargs)
6582 except self._autounmask_breakage:
6586 def get_backtrack_infos(self):
6587 return self._dynamic_config._backtrack_infos
6590 class _dep_check_composite_db(dbapi):
6592 A dbapi-like interface that is optimized for use in dep_check() calls.
6593 This is built on top of the existing depgraph package selection logic.
6594 Some packages that have been added to the graph may be masked from this
6595 view in order to influence the atom preference selection that occurs
6598 def __init__(self, depgraph, root):
6599 dbapi.__init__(self)
6600 self._depgraph = depgraph
6602 self._match_cache = {}
6603 self._cpv_pkg_map = {}
6605 def _clear_cache(self):
6606 self._match_cache.clear()
6607 self._cpv_pkg_map.clear()
6609 def cp_list(self, cp):
6611 Emulate cp_list just so it can be used to check for existence
6612 of new-style virtuals. Since it's a waste of time to return
6613 more than one cpv for this use case, a maximum of one cpv will
6616 if isinstance(cp, Atom):
6621 for pkg in self._depgraph._iter_match_pkgs_any(
6622 self._depgraph._frozen_config.roots[self._root], atom):
6629 def match(self, atom):
6630 ret = self._match_cache.get(atom)
6635 pkg, existing = self._depgraph._select_package(self._root, atom)
6637 if pkg is not None and self._visible(pkg):
6638 self._cpv_pkg_map[pkg.cpv] = pkg
6641 if pkg is not None and \
6642 atom.slot is None and \
6643 pkg.cp.startswith("virtual/") and \
6644 (("remove" not in self._depgraph._dynamic_config.myparams and
6645 "--update" not in self._depgraph._frozen_config.myopts) or
6647 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
6648 # For new-style virtual lookahead that occurs inside dep_check()
6649 # for bug #141118, examine all slots. This is needed so that newer
6650 # slots will not unnecessarily be pulled in when a satisfying lower
6651 # slot is already installed. For example, if virtual/jdk-1.5 is
6652 # satisfied via gcj-jdk then there's no need to pull in a newer
6653 # slot to satisfy a virtual/jdk dependency, unless --update is
6657 for virt_pkg in self._depgraph._iter_match_pkgs_any(
6658 self._depgraph._frozen_config.roots[self._root], atom):
6659 if virt_pkg.cp != pkg.cp:
6661 slots.add(virt_pkg.slot)
6663 slots.remove(pkg.slot)
6665 slot_atom = atom.with_slot(slots.pop())
6666 pkg, existing = self._depgraph._select_package(
6667 self._root, slot_atom)
6670 if not self._visible(pkg):
6672 self._cpv_pkg_map[pkg.cpv] = pkg
6676 self._cpv_sort_ascending(ret)
6678 self._match_cache[atom] = ret
6681 def _visible(self, pkg):
6682 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
6684 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
6685 except (StopIteration, portage.exception.InvalidDependString):
6689 if pkg.installed and \
6690 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
6691 # Account for packages with masks (like KEYWORDS masks)
6692 # that are usually ignored in visibility checks for
6693 # installed packages, in order to handle cases like
6695 myopts = self._depgraph._frozen_config.myopts
6696 use_ebuild_visibility = myopts.get(
6697 '--use-ebuild-visibility', 'n') != 'n'
6698 avoid_update = "--update" not in myopts and \
6699 "remove" not in self._depgraph._dynamic_config.myparams
6700 usepkgonly = "--usepkgonly" in myopts
6701 if not avoid_update:
6702 if not use_ebuild_visibility and usepkgonly:
6706 pkg_eb = self._depgraph._pkg(
6707 pkg.cpv, "ebuild", pkg.root_config,
6709 except portage.exception.PackageNotFound:
6710 pkg_eb_visible = False
6711 for pkg_eb in self._depgraph._iter_match_pkgs(
6712 pkg.root_config, "ebuild",
6713 Atom("=%s" % (pkg.cpv,))):
6714 if self._depgraph._pkg_visibility_check(pkg_eb):
6715 pkg_eb_visible = True
6717 if not pkg_eb_visible:
6720 if not self._depgraph._pkg_visibility_check(pkg_eb):
6723 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
6724 self._root].get(pkg.slot_atom)
6725 if in_graph is None:
6726 # Mask choices for packages which are not the highest visible
6727 # version within their slot (since they usually trigger slot
6729 highest_visible, in_graph = self._depgraph._select_package(
6730 self._root, pkg.slot_atom)
6731 # Note: highest_visible is not necessarily the real highest
6732 # visible, especially when --update is not enabled, so use
6733 # < operator instead of !=.
6734 if pkg < highest_visible:
6736 elif in_graph != pkg:
6737 # Mask choices for packages that would trigger a slot
6738 # conflict with a previously selected package.
6742 def aux_get(self, cpv, wants):
6743 metadata = self._cpv_pkg_map[cpv].metadata
6744 return [metadata.get(x, "") for x in wants]
6746 def match_pkgs(self, atom):
6747 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
6749 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
6751 if "--quiet" in myopts:
6752 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6753 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
6754 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6755 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
6758 s = search(root_config, spinner, "--searchdesc" in myopts,
6759 "--quiet" not in myopts, "--usepkg" in myopts,
6760 "--usepkgonly" in myopts)
6761 null_cp = portage.dep_getkey(insert_category_into_atom(
6763 cat, atom_pn = portage.catsplit(null_cp)
6764 s.searchkey = atom_pn
6765 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6768 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6769 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
6771 def _spinner_start(spinner, myopts):
6774 if "--quiet" not in myopts and \
6775 ("--pretend" in myopts or "--ask" in myopts or \
6776 "--tree" in myopts or "--verbose" in myopts):
6778 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
6780 elif "--buildpkgonly" in myopts:
6784 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
6785 if "--unordered-display" in myopts:
6786 portage.writemsg_stdout("\n" + \
6787 darkgreen("These are the packages that " + \
6788 "would be %s:" % action) + "\n\n")
6790 portage.writemsg_stdout("\n" + \
6791 darkgreen("These are the packages that " + \
6792 "would be %s, in reverse order:" % action) + "\n\n")
6794 portage.writemsg_stdout("\n" + \
6795 darkgreen("These are the packages that " + \
6796 "would be %s, in order:" % action) + "\n\n")
6798 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
6799 if not show_spinner:
6800 spinner.update = spinner.update_quiet
6803 portage.writemsg_stdout("Calculating dependencies ")
6805 def _spinner_stop(spinner):
6806 if spinner is None or \
6807 spinner.update == spinner.update_quiet:
6810 if spinner.update != spinner.update_basic:
6811 # update_basic is used for non-tty output,
6812 # so don't output backspaces in that case.
6813 portage.writemsg_stdout("\b\b")
6815 portage.writemsg_stdout("... done!\n")
6817 def backtrack_depgraph(settings, trees, myopts, myparams,
6818 myaction, myfiles, spinner):
6820 Raises PackageSetNotFound if myfiles contains a missing package set.
6822 _spinner_start(spinner, myopts)
6824 return _backtrack_depgraph(settings, trees, myopts, myparams,
6825 myaction, myfiles, spinner)
6827 _spinner_stop(spinner)
6830 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
6832 debug = "--debug" in myopts
6834 max_retries = myopts.get('--backtrack', 10)
6835 max_depth = max(1, (max_retries + 1) / 2)
6836 allow_backtracking = max_retries > 0
6837 backtracker = Backtracker(max_depth)
6840 frozen_config = _frozen_depgraph_config(settings, trees,
6845 if debug and mydepgraph is not None:
6847 "\n\nbacktracking try %s \n\n" % \
6848 backtracked, noiselevel=-1, level=logging.DEBUG)
6849 mydepgraph.display_problems()
6851 backtrack_parameters = backtracker.get()
6853 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6854 frozen_config=frozen_config,
6855 allow_backtracking=allow_backtracking,
6856 backtrack_parameters=backtrack_parameters)
6857 success, favorites = mydepgraph.select_files(myfiles)
6859 if success or mydepgraph.success_without_autounmask():
6861 elif not allow_backtracking:
6863 elif backtracked >= max_retries:
6865 elif mydepgraph.need_restart():
6867 backtracker.feedback(mydepgraph.get_backtrack_infos())
6871 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
6875 "\n\nbacktracking aborted after %s tries\n\n" % \
6876 backtracked, noiselevel=-1, level=logging.DEBUG)
6877 mydepgraph.display_problems()
6879 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6880 frozen_config=frozen_config,
6881 allow_backtracking=False,
6882 backtrack_parameters=backtracker.get_best_run())
6883 success, favorites = mydepgraph.select_files(myfiles)
6885 if not success and mydepgraph.autounmask_breakage_detected():
6888 "\n\nautounmask breakage detected\n\n",
6889 noiselevel=-1, level=logging.DEBUG)
6890 mydepgraph.display_problems()
6891 myopts["--autounmask"] = "n"
6892 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6893 frozen_config=frozen_config, allow_backtracking=False)
6894 success, favorites = mydepgraph.select_files(myfiles)
6896 return (success, mydepgraph, favorites)
6899 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6901 Raises PackageSetNotFound if myfiles contains a missing package set.
6903 _spinner_start(spinner, myopts)
6905 return _resume_depgraph(settings, trees, mtimedb, myopts,
6908 _spinner_stop(spinner)
6910 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6912 Construct a depgraph for the given resume list. This will raise
6913 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
6914 TODO: Return reasons for dropped_tasks, for display/logging.
6916 @returns: (success, depgraph, dropped_tasks)
6919 skip_unsatisfied = True
6920 mergelist = mtimedb["resume"]["mergelist"]
6921 dropped_tasks = set()
6922 frozen_config = _frozen_depgraph_config(settings, trees,
6925 mydepgraph = depgraph(settings, trees,
6926 myopts, myparams, spinner, frozen_config=frozen_config)
6928 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
6929 skip_masked=skip_masked)
6930 except depgraph.UnsatisfiedResumeDep as e:
6931 if not skip_unsatisfied:
6934 graph = mydepgraph._dynamic_config.digraph
6935 unsatisfied_parents = dict((dep.parent, dep.parent) \
6937 traversed_nodes = set()
6938 unsatisfied_stack = list(unsatisfied_parents)
6939 while unsatisfied_stack:
6940 pkg = unsatisfied_stack.pop()
6941 if pkg in traversed_nodes:
6943 traversed_nodes.add(pkg)
6945 # If this package was pulled in by a parent
6946 # package scheduled for merge, removing this
6947 # package may cause the the parent package's
6948 # dependency to become unsatisfied.
6949 for parent_node in graph.parent_nodes(pkg):
6950 if not isinstance(parent_node, Package) \
6951 or parent_node.operation not in ("merge", "nomerge"):
6953 # We need to traverse all priorities here, in order to
6954 # ensure that a package with an unsatisfied depenedency
6955 # won't get pulled in, even indirectly via a soft
6957 unsatisfied_parents[parent_node] = parent_node
6958 unsatisfied_stack.append(parent_node)
6960 unsatisfied_tuples = frozenset(tuple(parent_node)
6961 for parent_node in unsatisfied_parents
6962 if isinstance(parent_node, Package))
6963 pruned_mergelist = []
6965 if isinstance(x, list) and \
6966 tuple(x) not in unsatisfied_tuples:
6967 pruned_mergelist.append(x)
6969 # If the mergelist doesn't shrink then this loop is infinite.
6970 if len(pruned_mergelist) == len(mergelist):
6971 # This happens if a package can't be dropped because
6972 # it's already installed, but it has unsatisfied PDEPEND.
6974 mergelist[:] = pruned_mergelist
6976 # Exclude installed packages that have been removed from the graph due
6977 # to failure to build/install runtime dependencies after the dependent
6978 # package has already been installed.
6979 dropped_tasks.update(pkg for pkg in \
6980 unsatisfied_parents if pkg.operation != "nomerge")
6982 del e, graph, traversed_nodes, \
6983 unsatisfied_parents, unsatisfied_stack
6987 return (success, mydepgraph, dropped_tasks)
6989 def get_mask_info(root_config, cpv, pkgsettings,
6990 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
6993 metadata = dict(zip(db_keys,
6994 db.aux_get(cpv, db_keys, myrepo=myrepo)))
6998 if metadata is None:
6999 mreasons = ["corruption"]
7001 eapi = metadata['EAPI']
7004 if not portage.eapi_is_supported(eapi):
7005 mreasons = ['EAPI %s' % eapi]
7007 pkg = Package(type_name=pkg_type, root_config=root_config,
7008 cpv=cpv, built=built, installed=installed, metadata=metadata)
7011 if _pkg_use_enabled is not None:
7012 modified_use = _pkg_use_enabled(pkg)
7014 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7016 return metadata, mreasons
7018 def show_masked_packages(masked_packages):
7019 shown_licenses = set()
7020 shown_comments = set()
7021 # Maybe there is both an ebuild and a binary. Only
7022 # show one of them to avoid redundant appearance.
7024 have_eapi_mask = False
7025 for (root_config, pkgsettings, cpv, repo,
7026 metadata, mreasons) in masked_packages:
7029 output_cpv += _repo_separator + repo
7030 if output_cpv in shown_cpvs:
7032 shown_cpvs.add(output_cpv)
7033 eapi_masked = metadata is not None and \
7034 not portage.eapi_is_supported(metadata["EAPI"])
7036 have_eapi_mask = True
7037 # When masked by EAPI, metadata is mostly useless since
7038 # it doesn't contain essential things like SLOT.
7040 comment, filename = None, None
7041 if not eapi_masked and \
7042 "package.mask" in mreasons:
7043 comment, filename = \
7044 portage.getmaskingreason(
7045 cpv, metadata=metadata,
7046 settings=pkgsettings,
7047 portdb=root_config.trees["porttree"].dbapi,
7048 return_location=True)
7049 missing_licenses = []
7050 if not eapi_masked and metadata is not None:
7052 missing_licenses = \
7053 pkgsettings._getMissingLicenses(
7055 except portage.exception.InvalidDependString:
7056 # This will have already been reported
7057 # above via mreasons.
7060 writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
7062 if comment and comment not in shown_comments:
7063 writemsg_stdout(filename + ":\n" + comment + "\n",
7065 shown_comments.add(comment)
7066 portdb = root_config.trees["porttree"].dbapi
7067 for l in missing_licenses:
7068 l_path = portdb.findLicensePath(l)
7069 if l in shown_licenses:
7071 msg = ("A copy of the '%s' license" + \
7072 " is located at '%s'.\n\n") % (l, l_path)
7073 writemsg_stdout(msg, noiselevel=-1)
7074 shown_licenses.add(l)
7075 return have_eapi_mask
7077 def show_mask_docs():
7078 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
7079 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7081 def show_blocker_docs_link():
7082 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7083 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7084 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7086 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7087 return [mreason.message for \
7088 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7090 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7091 mreasons = _getmaskingstatus(
7092 pkg, settings=pkgsettings,
7093 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7095 if not pkg.installed:
7096 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
7097 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7098 pkg.metadata["CHOST"]))
7101 for msg_type, msgs in pkg.invalid.items():
7104 _MaskReason("invalid", "invalid: %s" % (msg,)))
7106 if not pkg.metadata["SLOT"]:
7108 _MaskReason("invalid", "SLOT: undefined"))