1 # Copyright 1999-2013 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function, unicode_literals
12 from collections import deque
13 from itertools import chain
16 from portage import os, OrderedDict
17 from portage import _unicode_decode, _unicode_encode, _encodings
18 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
19 from portage.dbapi import dbapi
20 from portage.dbapi.dep_expand import dep_expand
21 from portage.dbapi._similar_name_search import similar_name_search
22 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
23 check_required_use, human_readable_required_use, match_from_list, \
25 from portage.dep._slot_operator import ignore_built_slot_operator_deps
26 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
28 from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
29 PackageNotFound, PortageException)
30 from portage.output import colorize, create_color_func, \
32 bad = create_color_func("BAD")
33 from portage.package.ebuild.config import _get_feature_flags
34 from portage.package.ebuild.getmaskingstatus import \
35 _getmaskingstatus, _MaskReason
36 from portage._sets import SETPREFIX
37 from portage._sets.base import InternalPackageSet
38 from portage.util import ConfigProtect, shlex_split, new_protect_filename
39 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
40 from portage.util import ensure_dirs
41 from portage.util import writemsg_level, write_atomic
42 from portage.util.digraph import digraph
43 from portage.util.listdir import _ignorecvs_dirs
44 from portage.util._async.TaskScheduler import TaskScheduler
45 from portage.versions import catpkgsplit
47 from _emerge.AtomArg import AtomArg
48 from _emerge.Blocker import Blocker
49 from _emerge.BlockerCache import BlockerCache
50 from _emerge.BlockerDepPriority import BlockerDepPriority
51 from _emerge.countdown import countdown
52 from _emerge.create_world_atom import create_world_atom
53 from _emerge.Dependency import Dependency
54 from _emerge.DependencyArg import DependencyArg
55 from _emerge.DepPriority import DepPriority
56 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
57 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
58 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
59 from _emerge.FakeVartree import FakeVartree
60 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
61 from _emerge.is_valid_package_atom import insert_category_into_atom, \
63 from _emerge.Package import Package
64 from _emerge.PackageArg import PackageArg
65 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
66 from _emerge.RootConfig import RootConfig
67 from _emerge.search import search
68 from _emerge.SetArg import SetArg
69 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
70 from _emerge.UnmergeDepPriority import UnmergeDepPriority
71 from _emerge.UseFlagDisplay import pkg_use_display
72 from _emerge.userquery import userquery
74 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
75 from _emerge.resolver.slot_collision import slot_conflict_handler
76 from _emerge.resolver.circular_dependency import circular_dependency_handler
77 from _emerge.resolver.output import Display
79 if sys.hexversion >= 0x3000000:
86 class _scheduler_graph_config(object):
87 def __init__(self, trees, pkg_cache, graph, mergelist):
89 self.pkg_cache = pkg_cache
91 self.mergelist = mergelist
93 def _wildcard_set(atoms):
94 pkgs = InternalPackageSet(allow_wildcard=True)
97 x = Atom(x, allow_wildcard=True, allow_repo=False)
98 except portage.exception.InvalidAtom:
99 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
103 class _frozen_depgraph_config(object):
105 def __init__(self, settings, trees, myopts, spinner):
106 self.settings = settings
107 self.target_root = settings["EROOT"]
110 if settings.get("PORTAGE_DEBUG", "") == "1":
112 self.spinner = spinner
113 self._running_root = trees[trees._running_eroot]["root_config"]
114 self.pkgsettings = {}
116 self._trees_orig = trees
118 # All Package instances
120 self._highest_license_masked = {}
121 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
122 ignore_built_slot_operator_deps = myopts.get(
123 "--ignore-built-slot-operator-deps", "n") == "y"
125 self.trees[myroot] = {}
126 # Create a RootConfig instance that references
127 # the FakeVartree instead of the real one.
128 self.roots[myroot] = RootConfig(
129 trees[myroot]["vartree"].settings,
131 trees[myroot]["root_config"].setconfig)
132 for tree in ("porttree", "bintree"):
133 self.trees[myroot][tree] = trees[myroot][tree]
134 self.trees[myroot]["vartree"] = \
135 FakeVartree(trees[myroot]["root_config"],
136 pkg_cache=self._pkg_cache,
137 pkg_root_config=self.roots[myroot],
138 dynamic_deps=dynamic_deps,
139 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
140 self.pkgsettings[myroot] = portage.config(
141 clone=self.trees[myroot]["vartree"].settings)
143 self._required_set_names = set(["world"])
145 atoms = ' '.join(myopts.get("--exclude", [])).split()
146 self.excluded_pkgs = _wildcard_set(atoms)
147 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
148 self.reinstall_atoms = _wildcard_set(atoms)
149 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
150 self.usepkg_exclude = _wildcard_set(atoms)
151 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
152 self.useoldpkg_atoms = _wildcard_set(atoms)
153 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
154 self.rebuild_exclude = _wildcard_set(atoms)
155 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
156 self.rebuild_ignore = _wildcard_set(atoms)
158 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
159 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
160 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
162 class _depgraph_sets(object):
164 # contains all sets added to the graph
166 # contains non-set atoms given as arguments
167 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
168 # contains all atoms from all sets added to the graph, including
169 # atoms given as arguments
170 self.atoms = InternalPackageSet(allow_repo=True)
171 self.atom_arg_map = {}
173 class _rebuild_config(object):
174 def __init__(self, frozen_config, backtrack_parameters):
175 self._graph = digraph()
176 self._frozen_config = frozen_config
177 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
178 self.orig_rebuild_list = self.rebuild_list.copy()
179 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
180 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
181 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
182 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
183 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
184 self.rebuild_if_unbuilt)
186 def add(self, dep_pkg, dep):
187 parent = dep.collapsed_parent
188 priority = dep.collapsed_priority
189 rebuild_exclude = self._frozen_config.rebuild_exclude
190 rebuild_ignore = self._frozen_config.rebuild_ignore
191 if (self.rebuild and isinstance(parent, Package) and
192 parent.built and priority.buildtime and
193 isinstance(dep_pkg, Package) and
194 not rebuild_exclude.findAtomForPackage(parent) and
195 not rebuild_ignore.findAtomForPackage(dep_pkg)):
196 self._graph.add(dep_pkg, parent, priority)
198 def _needs_rebuild(self, dep_pkg):
199 """Check whether packages that depend on dep_pkg need to be rebuilt."""
200 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
201 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
204 if self.rebuild_if_unbuilt:
205 # dep_pkg is being installed from source, so binary
206 # packages for parents are invalid. Force rebuild
209 trees = self._frozen_config.trees
210 vardb = trees[dep_pkg.root]["vartree"].dbapi
211 if self.rebuild_if_new_rev:
212 # Parent packages are valid if a package with the same
213 # cpv is already installed.
214 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
216 # Otherwise, parent packages are valid if a package with the same
217 # version (excluding revision) is already installed.
218 assert self.rebuild_if_new_ver
219 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
220 for inst_cpv in vardb.match(dep_pkg.slot_atom):
221 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
222 if inst_cpv_norev == cpv_norev:
227 def _trigger_rebuild(self, parent, build_deps):
228 root_slot = (parent.root, parent.slot_atom)
229 if root_slot in self.rebuild_list:
231 trees = self._frozen_config.trees
233 for slot_atom, dep_pkg in build_deps.items():
234 dep_root_slot = (dep_pkg.root, slot_atom)
235 if self._needs_rebuild(dep_pkg):
236 self.rebuild_list.add(root_slot)
238 elif ("--usepkg" in self._frozen_config.myopts and
239 (dep_root_slot in self.reinstall_list or
240 dep_root_slot in self.rebuild_list or
241 not dep_pkg.installed)):
243 # A direct rebuild dependency is being installed. We
244 # should update the parent as well to the latest binary,
245 # if that binary is valid.
247 # To validate the binary, we check whether all of the
248 # rebuild dependencies are present on the same binhost.
250 # 1) If parent is present on the binhost, but one of its
251 # rebuild dependencies is not, then the parent should
252 # be rebuilt from source.
253 # 2) Otherwise, the parent binary is assumed to be valid,
254 # because all of its rebuild dependencies are
256 bintree = trees[parent.root]["bintree"]
257 uri = bintree.get_pkgindex_uri(parent.cpv)
258 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
259 bindb = bintree.dbapi
260 if self.rebuild_if_new_ver and uri and uri != dep_uri:
261 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
262 for cpv in bindb.match(dep_pkg.slot_atom):
263 if cpv_norev == catpkgsplit(cpv)[:-1]:
264 dep_uri = bintree.get_pkgindex_uri(cpv)
267 if uri and uri != dep_uri:
268 # 1) Remote binary package is invalid because it was
269 # built without dep_pkg. Force rebuild.
270 self.rebuild_list.add(root_slot)
272 elif (parent.installed and
273 root_slot not in self.reinstall_list):
275 bin_build_time, = bindb.aux_get(parent.cpv,
279 if bin_build_time != _unicode(parent.build_time):
280 # 2) Remote binary package is valid, and local package
281 # is not up to date. Force reinstall.
284 self.reinstall_list.add(root_slot)
287 def trigger_rebuilds(self):
289 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
290 depends on pkgA at both build-time and run-time, pkgB needs to be
297 leaf_nodes = deque(graph.leaf_nodes())
299 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
300 # will always know which children are being rebuilt.
303 # We'll have to drop an edge. This should be quite rare.
304 leaf_nodes.append(graph.order[-1])
306 node = leaf_nodes.popleft()
307 if node not in graph:
308 # This can be triggered by circular dependencies.
310 slot_atom = node.slot_atom
312 # Remove our leaf node from the graph, keeping track of deps.
313 parents = graph.parent_nodes(node)
315 node_build_deps = build_deps.get(node, {})
316 for parent in parents:
318 # Ignore a direct cycle.
320 parent_bdeps = build_deps.setdefault(parent, {})
321 parent_bdeps[slot_atom] = node
322 if not graph.child_nodes(parent):
323 leaf_nodes.append(parent)
325 # Trigger rebuilds for our leaf node. Because all of our children
326 # have been processed, the build_deps will be completely filled in,
327 # and self.rebuild_list / self.reinstall_list will tell us whether
328 # any of our children need to be rebuilt or reinstalled.
329 if self._trigger_rebuild(node, node_build_deps):
335 class _dynamic_depgraph_config(object):
337 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
338 self.myparams = myparams.copy()
339 self._vdb_loaded = False
340 self._allow_backtracking = allow_backtracking
341 # Maps slot atom to package for each Package added to the graph.
342 self._slot_pkg_map = {}
343 # Maps nodes to the reasons they were selected for reinstallation.
344 self._reinstall_nodes = {}
346 # Contains a filtered view of preferred packages that are selected
347 # from available repositories.
348 self._filtered_trees = {}
349 # Contains installed packages and new packages that have been added
351 self._graph_trees = {}
352 # Caches visible packages returned from _select_package, for use in
353 # depgraph._iter_atoms_for_pkg() SLOT logic.
354 self._visible_pkgs = {}
355 #contains the args created by select_files
356 self._initial_arg_list = []
357 self.digraph = portage.digraph()
358 # manages sets added to the graph
360 # contains all nodes pulled in by self.sets
361 self._set_nodes = set()
362 # Contains only Blocker -> Uninstall edges
363 self._blocker_uninstalls = digraph()
364 # Contains only Package -> Blocker edges
365 self._blocker_parents = digraph()
366 # Contains only irrelevant Package -> Blocker edges
367 self._irrelevant_blockers = digraph()
368 # Contains only unsolvable Package -> Blocker edges
369 self._unsolvable_blockers = digraph()
370 # Contains all Blocker -> Blocked Package edges
371 self._blocked_pkgs = digraph()
372 # Contains world packages that have been protected from
373 # uninstallation but may not have been added to the graph
374 # if the graph is not complete yet.
375 self._blocked_world_pkgs = {}
376 # Contains packages whose dependencies have been traversed.
377 # This use used to check if we have accounted for blockers
378 # relevant to a package.
379 self._traversed_pkg_deps = set()
380 # This should be ordered such that the backtracker will
381 # attempt to solve conflicts which occurred earlier first,
382 # since an earlier conflict can be the cause of a conflict
383 # which occurs later.
384 self._slot_collision_info = OrderedDict()
385 # Slot collision nodes are not allowed to block other packages since
386 # blocker validation is only able to account for one package per slot.
387 self._slot_collision_nodes = set()
388 self._parent_atoms = {}
389 self._slot_conflict_handler = None
390 self._circular_dependency_handler = None
391 self._serialized_tasks_cache = None
392 self._scheduler_graph = None
393 self._displayed_list = None
394 self._pprovided_args = []
395 self._missing_args = []
396 self._masked_installed = set()
397 self._masked_license_updates = set()
398 self._unsatisfied_deps_for_display = []
399 self._unsatisfied_blockers_for_display = None
400 self._circular_deps_for_display = None
402 self._dep_disjunctive_stack = []
403 self._unsatisfied_deps = []
404 self._initially_unsatisfied_deps = []
405 self._ignored_deps = []
406 self._highest_pkg_cache = {}
408 # Binary packages that have been rejected because their USE
409 # didn't match the user's config. It maps packages to a set
410 # of flags causing the rejection.
411 self.ignored_binaries = {}
413 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
414 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
415 self._needed_license_changes = backtrack_parameters.needed_license_changes
416 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
417 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
418 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
419 self._prune_rebuilds = backtrack_parameters.prune_rebuilds
420 self._need_restart = False
421 # For conditions that always require user intervention, such as
422 # unsatisfied REQUIRED_USE (currently has no autounmask support).
423 self._skip_restart = False
424 self._backtrack_infos = {}
426 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
427 self._success_without_autounmask = False
428 self._traverse_ignored_deps = False
429 self._complete_mode = False
430 self._slot_operator_deps = {}
432 for myroot in depgraph._frozen_config.trees:
433 self.sets[myroot] = _depgraph_sets()
434 self._slot_pkg_map[myroot] = {}
435 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
436 # This dbapi instance will model the state that the vdb will
437 # have after new packages have been installed.
438 fakedb = PackageVirtualDbapi(vardb.settings)
440 self.mydbapi[myroot] = fakedb
443 graph_tree.dbapi = fakedb
444 self._graph_trees[myroot] = {}
445 self._filtered_trees[myroot] = {}
446 # Substitute the graph tree for the vartree in dep_check() since we
447 # want atom selections to be consistent with package selections
448 # have already been made.
449 self._graph_trees[myroot]["porttree"] = graph_tree
450 self._graph_trees[myroot]["vartree"] = graph_tree
451 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
452 self._graph_trees[myroot]["graph"] = self.digraph
455 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
456 self._filtered_trees[myroot]["porttree"] = filtered_tree
457 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
459 # Passing in graph_tree as the vartree here could lead to better
460 # atom selections in some cases by causing atoms for packages that
461 # have been added to the graph to be preferred over other choices.
462 # However, it can trigger atom selections that result in
463 # unresolvable direct circular dependencies. For example, this
464 # happens with gwydion-dylan which depends on either itself or
465 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
466 # gwydion-dylan-bin needs to be selected in order to avoid a
467 # an unresolvable direct circular dependency.
469 # To solve the problem described above, pass in "graph_db" so that
470 # packages that have been added to the graph are distinguishable
471 # from other available packages and installed packages. Also, pass
472 # the parent package into self._select_atoms() calls so that
473 # unresolvable direct circular dependencies can be detected and
474 # avoided when possible.
475 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
476 self._filtered_trees[myroot]["graph"] = self.digraph
477 self._filtered_trees[myroot]["vartree"] = \
478 depgraph._frozen_config.trees[myroot]["vartree"]
481 # (db, pkg_type, built, installed, db_keys)
482 if "remove" in self.myparams:
483 # For removal operations, use _dep_check_composite_db
484 # for availability and visibility checks. This provides
485 # consistency with install operations, so we don't
486 # get install/uninstall cycles like in bug #332719.
487 self._graph_trees[myroot]["porttree"] = filtered_tree
489 if "--usepkgonly" not in depgraph._frozen_config.myopts:
490 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
491 db_keys = list(portdb._aux_cache_keys)
492 dbs.append((portdb, "ebuild", False, False, db_keys))
494 if "--usepkg" in depgraph._frozen_config.myopts:
495 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
496 db_keys = list(bindb._aux_cache_keys)
497 dbs.append((bindb, "binary", True, False, db_keys))
499 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
500 db_keys = list(depgraph._frozen_config._trees_orig[myroot
501 ]["vartree"].dbapi._aux_cache_keys)
502 dbs.append((vardb, "installed", True, True, db_keys))
503 self._filtered_trees[myroot]["dbs"] = dbs
505 class depgraph(object):
507 pkg_tree_map = RootConfig.pkg_tree_map
509 def __init__(self, settings, trees, myopts, myparams, spinner,
510 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
511 if frozen_config is None:
512 frozen_config = _frozen_depgraph_config(settings, trees,
514 self._frozen_config = frozen_config
515 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
516 allow_backtracking, backtrack_parameters)
517 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
519 self._select_atoms = self._select_atoms_highest_available
520 self._select_package = self._select_pkg_highest_available
524 Load installed package metadata if appropriate. This used to be called
525 from the constructor, but that wasn't very nice since this procedure
526 is slow and it generates spinner output. So, now it's called on-demand
527 by various methods when necessary.
530 if self._dynamic_config._vdb_loaded:
533 for myroot in self._frozen_config.trees:
535 dynamic_deps = self._dynamic_config.myparams.get(
536 "dynamic_deps", "y") != "n"
537 preload_installed_pkgs = \
538 "--nodeps" not in self._frozen_config.myopts
540 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
541 if not fake_vartree.dbapi:
542 # This needs to be called for the first depgraph, but not for
543 # backtracking depgraphs that share the same frozen_config.
546 # FakeVartree.sync() populates virtuals, and we want
547 # self.pkgsettings to have them populated too.
548 self._frozen_config.pkgsettings[myroot] = \
549 portage.config(clone=fake_vartree.settings)
551 if preload_installed_pkgs:
552 vardb = fake_vartree.dbapi
553 fakedb = self._dynamic_config._graph_trees[
554 myroot]["vartree"].dbapi
558 fakedb.cpv_inject(pkg)
560 max_jobs = self._frozen_config.myopts.get("--jobs")
561 max_load = self._frozen_config.myopts.get("--load-average")
562 scheduler = TaskScheduler(
563 self._dynamic_deps_preload(fake_vartree, fakedb),
566 event_loop=fake_vartree._portdb._event_loop)
570 self._dynamic_config._vdb_loaded = True
572 def _dynamic_deps_preload(self, fake_vartree, fakedb):
573 portdb = fake_vartree._portdb
574 for pkg in fake_vartree.dbapi:
575 self._spinner_update()
576 fakedb.cpv_inject(pkg)
577 ebuild_path, repo_path = \
578 portdb.findname2(pkg.cpv, myrepo=pkg.repo)
579 if ebuild_path is None:
580 fake_vartree.dynamic_deps_preload(pkg, None)
582 metadata, ebuild_hash = portdb._pull_valid_cache(
583 pkg.cpv, ebuild_path, repo_path)
584 if metadata is not None:
585 fake_vartree.dynamic_deps_preload(pkg, metadata)
587 proc = EbuildMetadataPhase(cpv=pkg.cpv,
588 ebuild_hash=ebuild_hash,
589 portdb=portdb, repo_path=repo_path,
590 settings=portdb.doebuild_settings)
591 proc.addExitListener(
592 self._dynamic_deps_proc_exit(pkg, fake_vartree))
595 class _dynamic_deps_proc_exit(object):
597 __slots__ = ('_pkg', '_fake_vartree')
599 def __init__(self, pkg, fake_vartree):
601 self._fake_vartree = fake_vartree
603 def __call__(self, proc):
605 if proc.returncode == os.EX_OK:
606 metadata = proc.metadata
607 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
609 def _spinner_update(self):
610 if self._frozen_config.spinner:
611 self._frozen_config.spinner.update()
613 def _show_ignored_binaries(self):
615 Show binaries that have been ignored because their USE didn't
616 match the user's config.
618 if not self._dynamic_config.ignored_binaries \
619 or '--quiet' in self._frozen_config.myopts \
620 or self._dynamic_config.myparams.get(
621 "binpkg_respect_use") in ("y", "n"):
624 for pkg in list(self._dynamic_config.ignored_binaries):
626 selected_pkg = self._dynamic_config.mydbapi[pkg.root
627 ].match_pkgs(pkg.slot_atom)
632 selected_pkg = selected_pkg[-1]
633 if selected_pkg > pkg:
634 self._dynamic_config.ignored_binaries.pop(pkg)
637 if selected_pkg.installed and \
638 selected_pkg.cpv == pkg.cpv and \
639 selected_pkg.build_time == pkg.build_time:
640 # We don't care about ignored binaries when an
641 # identical installed instance is selected to
643 self._dynamic_config.ignored_binaries.pop(pkg)
646 if not self._dynamic_config.ignored_binaries:
649 self._show_merge_list()
651 writemsg("\n!!! The following binary packages have been ignored " + \
652 "due to non matching USE:\n\n", noiselevel=-1)
654 for pkg, flags in self._dynamic_config.ignored_binaries.items():
656 for flag in sorted(flags):
657 if flag not in pkg.use.enabled:
659 flag_display.append(flag)
660 flag_display = " ".join(flag_display)
661 # The user can paste this line into package.use
662 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
663 if pkg.root_config.settings["ROOT"] != "/":
664 writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
665 writemsg("\n", noiselevel=-1)
669 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
670 " from ignoring these binary packages if possible.",
671 " Using --binpkg-respect-use=y will silence this warning."
676 line = colorize("INFORM", line)
677 writemsg(line + "\n", noiselevel=-1)
679 def _get_missed_updates(self):
681 # In order to minimize noise, show only the highest
682 # missed update from each SLOT.
684 for pkg, mask_reasons in \
685 self._dynamic_config._runtime_pkg_mask.items():
687 # Exclude installed here since we only
688 # want to show available updates.
690 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
691 ].match_pkgs(pkg.slot_atom)
692 if not chosen_pkg or chosen_pkg[-1] >= pkg:
694 k = (pkg.root, pkg.slot_atom)
695 if k in missed_updates:
696 other_pkg, mask_type, parent_atoms = missed_updates[k]
699 for mask_type, parent_atoms in mask_reasons.items():
702 missed_updates[k] = (pkg, mask_type, parent_atoms)
705 return missed_updates
707 def _show_missed_update(self):
709 missed_updates = self._get_missed_updates()
711 if not missed_updates:
714 missed_update_types = {}
715 for pkg, mask_type, parent_atoms in missed_updates.values():
716 missed_update_types.setdefault(mask_type,
717 []).append((pkg, parent_atoms))
719 if '--quiet' in self._frozen_config.myopts and \
720 '--debug' not in self._frozen_config.myopts:
721 missed_update_types.pop("slot conflict", None)
722 missed_update_types.pop("missing dependency", None)
724 self._show_missed_update_slot_conflicts(
725 missed_update_types.get("slot conflict"))
727 self._show_missed_update_unsatisfied_dep(
728 missed_update_types.get("missing dependency"))
730 def _show_missed_update_unsatisfied_dep(self, missed_updates):
732 if not missed_updates:
735 self._show_merge_list()
736 backtrack_masked = []
738 for pkg, parent_atoms in missed_updates:
741 for parent, root, atom in parent_atoms:
742 self._show_unsatisfied_dep(root, atom, myparent=parent,
743 check_backtrack=True)
744 except self._backtrack_mask:
745 # This is displayed below in abbreviated form.
746 backtrack_masked.append((pkg, parent_atoms))
749 writemsg("\n!!! The following update has been skipped " + \
750 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
752 writemsg(str(pkg.slot_atom), noiselevel=-1)
753 if pkg.root_config.settings["ROOT"] != "/":
754 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
755 writemsg("\n", noiselevel=-1)
757 for parent, root, atom in parent_atoms:
758 self._show_unsatisfied_dep(root, atom, myparent=parent)
759 writemsg("\n", noiselevel=-1)
762 # These are shown in abbreviated form, in order to avoid terminal
763 # flooding from mask messages as reported in bug #285832.
764 writemsg("\n!!! The following update(s) have been skipped " + \
765 "due to unsatisfied dependencies\n" + \
766 "!!! triggered by backtracking:\n\n", noiselevel=-1)
767 for pkg, parent_atoms in backtrack_masked:
768 writemsg(str(pkg.slot_atom), noiselevel=-1)
769 if pkg.root_config.settings["ROOT"] != "/":
770 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
771 writemsg("\n", noiselevel=-1)
773 def _show_missed_update_slot_conflicts(self, missed_updates):
775 if not missed_updates:
778 self._show_merge_list()
780 msg.append("\nWARNING: One or more updates have been " + \
781 "skipped due to a dependency conflict:\n\n")
784 for pkg, parent_atoms in missed_updates:
785 msg.append(str(pkg.slot_atom))
786 if pkg.root_config.settings["ROOT"] != "/":
787 msg.append(" for %s" % (pkg.root,))
790 for parent, atom in parent_atoms:
794 msg.append(" conflicts with\n")
796 if isinstance(parent,
797 (PackageArg, AtomArg)):
798 # For PackageArg and AtomArg types, it's
799 # redundant to display the atom attribute.
800 msg.append(str(parent))
802 # Display the specific atom from SetArg or
804 msg.append("%s required by %s" % (atom, parent))
808 writemsg("".join(msg), noiselevel=-1)
810 def _show_slot_collision_notice(self):
811 """Show an informational message advising the user to mask one of the
812 the packages. In some cases it may be possible to resolve this
813 automatically, but support for backtracking (removal nodes that have
814 already been selected) will be required in order to handle all possible
818 if not self._dynamic_config._slot_collision_info:
821 self._show_merge_list()
823 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
824 handler = self._dynamic_config._slot_conflict_handler
826 conflict = handler.get_conflict()
827 writemsg(conflict, noiselevel=-1)
829 explanation = handler.get_explanation()
831 writemsg(explanation, noiselevel=-1)
834 if "--quiet" in self._frozen_config.myopts:
838 msg.append("It may be possible to solve this problem ")
839 msg.append("by using package.mask to prevent one of ")
840 msg.append("those packages from being selected. ")
841 msg.append("However, it is also possible that conflicting ")
842 msg.append("dependencies exist such that they are impossible to ")
843 msg.append("satisfy simultaneously. If such a conflict exists in ")
844 msg.append("the dependencies of two different packages, then those ")
845 msg.append("packages can not be installed simultaneously.")
846 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
847 if not self._dynamic_config._allow_backtracking and \
848 (backtrack_opt is None or \
849 (backtrack_opt > 0 and backtrack_opt < 30)):
850 msg.append(" You may want to try a larger value of the ")
851 msg.append("--backtrack option, such as --backtrack=30, ")
852 msg.append("in order to see if that will solve this conflict ")
853 msg.append("automatically.")
855 for line in textwrap.wrap(''.join(msg), 70):
856 writemsg(line + '\n', noiselevel=-1)
857 writemsg('\n', noiselevel=-1)
860 msg.append("For more information, see MASKED PACKAGES ")
861 msg.append("section in the emerge man page or refer ")
862 msg.append("to the Gentoo Handbook.")
863 for line in textwrap.wrap(''.join(msg), 70):
864 writemsg(line + '\n', noiselevel=-1)
865 writemsg('\n', noiselevel=-1)
867 def _process_slot_conflicts(self):
869 If there are any slot conflicts and backtracking is enabled,
870 _complete_graph should complete the graph before this method
871 is called, so that all relevant reverse dependencies are
872 available for use in backtracking decisions.
874 for (slot_atom, root), slot_nodes in \
875 self._dynamic_config._slot_collision_info.items():
876 self._process_slot_conflict(root, slot_atom, slot_nodes)
878 def _process_slot_conflict(self, root, slot_atom, slot_nodes):
880 Process slot conflict data to identify specific atoms which
881 lead to conflict. These atoms only match a subset of the
882 packages that have been pulled into a given slot.
885 debug = "--debug" in self._frozen_config.myopts
887 slot_parent_atoms = set()
888 for pkg in slot_nodes:
889 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
892 slot_parent_atoms.update(parent_atoms)
896 for pkg in slot_nodes:
898 if self._dynamic_config._allow_backtracking and \
899 pkg in self._dynamic_config._runtime_pkg_mask:
902 "!!! backtracking loop detected: %s %s\n" % \
904 self._dynamic_config._runtime_pkg_mask[pkg]),
905 level=logging.DEBUG, noiselevel=-1)
907 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
908 if parent_atoms is None:
910 self._dynamic_config._parent_atoms[pkg] = parent_atoms
913 for parent_atom in slot_parent_atoms:
914 if parent_atom in parent_atoms:
916 # Use package set for matching since it will match via
917 # PROVIDE when necessary, while match_from_list does not.
918 parent, atom = parent_atom
919 atom_set = InternalPackageSet(
920 initial_atoms=(atom,), allow_repo=True)
921 if atom_set.findAtomForPackage(pkg,
922 modified_use=self._pkg_use_enabled(pkg)):
923 parent_atoms.add(parent_atom)
926 conflict_atoms.setdefault(parent_atom, set()).add(pkg)
929 conflict_pkgs.append(pkg)
931 if conflict_pkgs and \
932 self._dynamic_config._allow_backtracking and \
933 not self._accept_blocker_conflicts():
935 for pkg in conflict_pkgs:
936 if self._slot_conflict_backtrack_abi(pkg,
937 slot_nodes, conflict_atoms):
938 backtrack_infos = self._dynamic_config._backtrack_infos
939 config = backtrack_infos.setdefault("config", {})
940 config.setdefault("slot_conflict_abi", set()).add(pkg)
942 remaining.append(pkg)
944 self._slot_confict_backtrack(root, slot_atom,
945 slot_parent_atoms, remaining)
947 def _slot_confict_backtrack(self, root, slot_atom,
948 all_parents, conflict_pkgs):
950 debug = "--debug" in self._frozen_config.myopts
951 existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
953 # The ordering of backtrack_data can make
954 # a difference here, because both mask actions may lead
955 # to valid, but different, solutions and the one with
956 # 'existing_node' masked is usually the better one. Because
957 # of that, we choose an order such that
958 # the backtracker will first explore the choice with
959 # existing_node masked. The backtracker reverses the
960 # order, so the order it uses is the reverse of the
961 # order shown here. See bug #339606.
962 if existing_node in conflict_pkgs and \
963 existing_node is not conflict_pkgs[-1]:
964 conflict_pkgs.remove(existing_node)
965 conflict_pkgs.append(existing_node)
966 for to_be_masked in conflict_pkgs:
967 # For missed update messages, find out which
968 # atoms matched to_be_selected that did not
969 # match to_be_masked.
971 self._dynamic_config._parent_atoms.get(to_be_masked, set())
972 conflict_atoms = set(parent_atom for parent_atom in all_parents \
973 if parent_atom not in parent_atoms)
974 backtrack_data.append((to_be_masked, conflict_atoms))
976 if len(backtrack_data) > 1:
977 # NOTE: Generally, we prefer to mask the higher
978 # version since this solves common cases in which a
979 # lower version is needed so that all dependencies
980 # will be satisfied (bug #337178). However, if
981 # existing_node happens to be installed then we
982 # mask that since this is a common case that is
983 # triggered when --update is not enabled.
984 if existing_node.installed:
986 elif any(pkg > existing_node for pkg in conflict_pkgs):
987 backtrack_data.reverse()
989 to_be_masked = backtrack_data[-1][0]
991 self._dynamic_config._backtrack_infos.setdefault(
992 "slot conflict", []).append(backtrack_data)
993 self._dynamic_config._need_restart = True
998 msg.append("backtracking due to slot conflict:")
999 msg.append(" first package: %s" % existing_node)
1000 msg.append(" package to mask: %s" % to_be_masked)
1001 msg.append(" slot: %s" % slot_atom)
1002 msg.append(" parents: %s" % ", ".join( \
1003 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1005 writemsg_level("".join("%s\n" % l for l in msg),
1006 noiselevel=-1, level=logging.DEBUG)
1008 def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
1010 If one or more conflict atoms have a slot/sub-slot dep that can be resolved
1011 by rebuilding the parent package, then schedule the rebuild via
1012 backtracking, and return True. Otherwise, return False.
1015 found_update = False
1016 for parent_atom, conflict_pkgs in conflict_atoms.items():
1017 parent, atom = parent_atom
1018 if atom.slot_operator != "=" or not parent.built:
1021 if pkg not in conflict_pkgs:
1024 for other_pkg in slot_nodes:
1025 if other_pkg in conflict_pkgs:
1028 dep = Dependency(atom=atom, child=other_pkg,
1029 parent=parent, root=pkg.root)
1031 if self._slot_operator_update_probe(dep, slot_conflict=True):
1032 self._slot_operator_update_backtrack(dep)
1037 def _slot_change_probe(self, dep):
1040 @return: True if dep.child should be rebuilt due to a change
1041 in sub-slot (without revbump, as in bug #456208).
1043 if not (isinstance(dep.parent, Package) and \
1044 not dep.parent.built and dep.child.built):
1047 root_config = self._frozen_config.roots[dep.root]
1050 matches.append(self._pkg(dep.child.cpv, "ebuild",
1051 root_config, myrepo=dep.child.repo))
1052 except PackageNotFound:
1055 for unbuilt_child in chain(matches,
1056 self._iter_match_pkgs(root_config, "ebuild",
1057 Atom("=%s" % (dep.child.cpv,)))):
1058 if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
1060 if self._frozen_config.excluded_pkgs.findAtomForPackage(
1062 modified_use=self._pkg_use_enabled(unbuilt_child)):
1064 if not self._pkg_visibility_check(unbuilt_child):
1070 if unbuilt_child.slot == dep.child.slot and \
1071 unbuilt_child.sub_slot == dep.child.sub_slot:
1074 return unbuilt_child
1076 def _slot_change_backtrack(self, dep, new_child_slot):
1078 if "--debug" in self._frozen_config.myopts:
1082 msg.append("backtracking due to slot/sub-slot change:")
1083 msg.append(" child package: %s" % child)
1084 msg.append(" child slot: %s/%s" %
1085 (child.slot, child.sub_slot))
1086 msg.append(" new child: %s" % new_child_slot)
1087 msg.append(" new child slot: %s/%s" %
1088 (new_child_slot.slot, new_child_slot.sub_slot))
1089 msg.append(" parent package: %s" % dep.parent)
1090 msg.append(" atom: %s" % dep.atom)
1092 writemsg_level("\n".join(msg),
1093 noiselevel=-1, level=logging.DEBUG)
1094 backtrack_infos = self._dynamic_config._backtrack_infos
1095 config = backtrack_infos.setdefault("config", {})
1097 # mask unwanted binary packages if necessary
1099 if not child.installed:
1100 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
1102 config.setdefault("slot_operator_mask_built", {}).update(masks)
1104 # trigger replacement of installed packages if necessary
1107 replacement_atom = self._replace_installed_atom(child)
1108 if replacement_atom is not None:
1109 reinstalls.add((child.root, replacement_atom))
1111 config.setdefault("slot_operator_replace_installed",
1112 set()).update(reinstalls)
1114 self._dynamic_config._need_restart = True
1116 def _slot_operator_update_backtrack(self, dep, new_child_slot=None):
1117 if new_child_slot is None:
1120 child = new_child_slot
1121 if "--debug" in self._frozen_config.myopts:
1125 msg.append("backtracking due to missed slot abi update:")
1126 msg.append(" child package: %s" % child)
1127 if new_child_slot is not None:
1128 msg.append(" new child slot package: %s" % new_child_slot)
1129 msg.append(" parent package: %s" % dep.parent)
1130 msg.append(" atom: %s" % dep.atom)
1132 writemsg_level("\n".join(msg),
1133 noiselevel=-1, level=logging.DEBUG)
1134 backtrack_infos = self._dynamic_config._backtrack_infos
1135 config = backtrack_infos.setdefault("config", {})
1137 # mask unwanted binary packages if necessary
1139 if new_child_slot is None:
1140 if not child.installed:
1141 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
1142 if not dep.parent.installed:
1143 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
1145 config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
1147 # trigger replacement of installed packages if necessary
1148 abi_reinstalls = set()
1149 if dep.parent.installed:
1150 replacement_atom = self._replace_installed_atom(dep.parent)
1151 if replacement_atom is not None:
1152 abi_reinstalls.add((dep.parent.root, replacement_atom))
1153 if new_child_slot is None and child.installed:
1154 replacement_atom = self._replace_installed_atom(child)
1155 if replacement_atom is not None:
1156 abi_reinstalls.add((child.root, replacement_atom))
1158 config.setdefault("slot_operator_replace_installed",
1159 set()).update(abi_reinstalls)
1161 self._dynamic_config._need_restart = True
1163 def _slot_operator_update_probe(self, dep, new_child_slot=False,
1164 slot_conflict=False):
1166 slot/sub-slot := operators tend to prevent updates from getting pulled in,
1167 since installed packages pull in packages with the slot/sub-slot that they
1168 were built against. Detect this case so that we can schedule rebuilds
1169 and reinstalls when appropriate.
1170 NOTE: This function only searches for updates that involve upgrades
1171 to higher versions, since the logic required to detect when a
1172 downgrade would be desirable is not implemented.
1175 if dep.child.installed and \
1176 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
1177 modified_use=self._pkg_use_enabled(dep.child)):
1180 if dep.parent.installed and \
1181 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1182 modified_use=self._pkg_use_enabled(dep.parent)):
1185 debug = "--debug" in self._frozen_config.myopts
1186 selective = "selective" in self._dynamic_config.myparams
1187 want_downgrade = None
1189 for replacement_parent in self._iter_similar_available(dep.parent,
1190 dep.parent.slot_atom):
1192 for atom in replacement_parent.validated_atoms:
1193 if not atom.slot_operator == "=" or \
1195 atom.cp != dep.atom.cp:
1198 # Discard USE deps, we're only searching for an approximate
1199 # pattern, and dealing with USE states is too complex for
1201 atom = atom.without_use
1203 if replacement_parent.built and \
1204 portage.dep._match_slot(atom, dep.child):
1205 # Our selected replacement_parent appears to be built
1206 # for the existing child selection. So, discard this
1207 # parent and search for another.
1210 for pkg in self._iter_similar_available(
1212 if pkg.slot == dep.child.slot and \
1213 pkg.sub_slot == dep.child.sub_slot:
1214 # If slot/sub-slot is identical, then there's
1215 # no point in updating.
1218 if pkg.slot == dep.child.slot:
1221 # the new slot only matters if the
1222 # package version is higher
1225 if pkg.slot != dep.child.slot:
1228 if want_downgrade is None:
1229 want_downgrade = self._downgrade_probe(dep.child)
1230 # be careful not to trigger a rebuild when
1231 # the only version available with a
1232 # different slot_operator is an older version
1233 if not want_downgrade:
1236 insignificant = False
1237 if not slot_conflict and \
1239 dep.parent.installed and \
1240 dep.child.installed and \
1241 dep.parent.cpv == replacement_parent.cpv and \
1242 dep.child.cpv == pkg.cpv:
1243 # Then can happen if the child's sub-slot changed
1244 # without a revision bump. The sub-slot change is
1245 # considered insignificant until one of its parent
1246 # packages needs to be rebuilt (which may trigger a
1248 insignificant = True
1254 msg.append("slot_operator_update_probe:")
1255 msg.append(" existing child package: %s" % dep.child)
1256 msg.append(" existing parent package: %s" % dep.parent)
1257 msg.append(" new child package: %s" % pkg)
1258 msg.append(" new parent package: %s" % replacement_parent)
1260 msg.append("insignificant changes detected")
1262 writemsg_level("\n".join(msg),
1263 noiselevel=-1, level=logging.DEBUG)
1274 msg.append("slot_operator_update_probe:")
1275 msg.append(" existing child package: %s" % dep.child)
1276 msg.append(" existing parent package: %s" % dep.parent)
1277 msg.append(" new child package: %s" % None)
1278 msg.append(" new parent package: %s" % None)
1280 writemsg_level("\n".join(msg),
1281 noiselevel=-1, level=logging.DEBUG)
1285 def _slot_operator_unsatisfied_probe(self, dep):
1287 if dep.parent.installed and \
1288 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1289 modified_use=self._pkg_use_enabled(dep.parent)):
1292 debug = "--debug" in self._frozen_config.myopts
1294 for replacement_parent in self._iter_similar_available(dep.parent,
1295 dep.parent.slot_atom):
1297 for atom in replacement_parent.validated_atoms:
1298 if not atom.slot_operator == "=" or \
1300 atom.cp != dep.atom.cp:
1303 # Discard USE deps, we're only searching for an approximate
1304 # pattern, and dealing with USE states is too complex for
1306 atom = atom.without_use
1308 pkg, existing_node = self._select_package(dep.root, atom,
1309 onlydeps=dep.onlydeps)
1317 msg.append("slot_operator_unsatisfied_probe:")
1318 msg.append(" existing parent package: %s" % dep.parent)
1319 msg.append(" existing parent atom: %s" % dep.atom)
1320 msg.append(" new parent package: %s" % replacement_parent)
1321 msg.append(" new child package: %s" % pkg)
1323 writemsg_level("\n".join(msg),
1324 noiselevel=-1, level=logging.DEBUG)
1332 msg.append("slot_operator_unsatisfied_probe:")
1333 msg.append(" existing parent package: %s" % dep.parent)
1334 msg.append(" existing parent atom: %s" % dep.atom)
1335 msg.append(" new parent package: %s" % None)
1336 msg.append(" new child package: %s" % None)
1338 writemsg_level("\n".join(msg),
1339 noiselevel=-1, level=logging.DEBUG)
1343 def _slot_operator_unsatisfied_backtrack(self, dep):
1347 if "--debug" in self._frozen_config.myopts:
1351 msg.append("backtracking due to unsatisfied "
1352 "built slot-operator dep:")
1353 msg.append(" parent package: %s" % parent)
1354 msg.append(" atom: %s" % dep.atom)
1356 writemsg_level("\n".join(msg),
1357 noiselevel=-1, level=logging.DEBUG)
1359 backtrack_infos = self._dynamic_config._backtrack_infos
1360 config = backtrack_infos.setdefault("config", {})
1362 # mask unwanted binary packages if necessary
1364 if not parent.installed:
1365 masks.setdefault(parent, {})["slot_operator_mask_built"] = None
1367 config.setdefault("slot_operator_mask_built", {}).update(masks)
1369 # trigger replacement of installed packages if necessary
1371 if parent.installed:
1372 replacement_atom = self._replace_installed_atom(parent)
1373 if replacement_atom is not None:
1374 reinstalls.add((parent.root, replacement_atom))
1376 config.setdefault("slot_operator_replace_installed",
1377 set()).update(reinstalls)
1379 self._dynamic_config._need_restart = True
1381 def _downgrade_probe(self, pkg):
1383 Detect cases where a downgrade of the given package is considered
1384 desirable due to the current version being masked or unavailable.
1386 available_pkg = None
1387 for available_pkg in self._iter_similar_available(pkg,
1389 if available_pkg >= pkg:
1390 # There's an available package of the same or higher
1391 # version, so downgrade seems undesirable.
1394 return available_pkg is not None
1396 def _iter_similar_available(self, graph_pkg, atom):
1398 Given a package that's in the graph, do a rough check to
1399 see if a similar package is available to install. The given
1400 graph_pkg itself may be yielded only if it's not installed.
1403 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
1404 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
1405 use_ebuild_visibility = self._frozen_config.myopts.get(
1406 '--use-ebuild-visibility', 'n') != 'n'
1408 for pkg in self._iter_match_pkgs_any(
1409 graph_pkg.root_config, atom):
1410 if pkg.cp != graph_pkg.cp:
1411 # discard old-style virtual match
1415 if pkg in self._dynamic_config._runtime_pkg_mask:
1417 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1418 modified_use=self._pkg_use_enabled(pkg)):
1420 if not self._pkg_visibility_check(pkg):
1423 if self._equiv_binary_installed(pkg):
1425 if not (not use_ebuild_visibility and
1426 (usepkgonly or useoldpkg_atoms.findAtomForPackage(
1427 pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
1428 not self._equiv_ebuild_visible(pkg):
1432 def _replace_installed_atom(self, inst_pkg):
1434 Given an installed package, generate an atom suitable for
1435 slot_operator_replace_installed backtracking info. The replacement
1436 SLOT may differ from the installed SLOT, so first search by cpv.
1439 for pkg in self._iter_similar_available(inst_pkg,
1440 Atom("=%s" % inst_pkg.cpv)):
1442 return pkg.slot_atom
1443 elif not pkg.installed:
1444 # avoid using SLOT from a built instance
1445 built_pkgs.append(pkg)
1447 for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
1449 return pkg.slot_atom
1450 elif not pkg.installed:
1451 # avoid using SLOT from a built instance
1452 built_pkgs.append(pkg)
1456 for pkg in built_pkgs:
1457 if best_version is None or pkg > best_version:
1459 return best_version.slot_atom
1463 def _slot_operator_trigger_reinstalls(self):
1465 Search for packages with slot-operator deps on older slots, and schedule
1466 rebuilds if they can link to a newer slot that's in the graph.
1469 rebuild_if_new_slot = self._dynamic_config.myparams.get(
1470 "rebuild_if_new_slot", "y") == "y"
1472 for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
1474 for dep in slot_info:
1477 if atom.slot_operator is None:
1480 if not atom.slot_operator_built:
1481 new_child_slot = self._slot_change_probe(dep)
1482 if new_child_slot is not None:
1483 self._slot_change_backtrack(dep, new_child_slot)
1486 if not (dep.parent and
1487 isinstance(dep.parent, Package) and dep.parent.built):
1490 # Check for slot update first, since we don't want to
1491 # trigger reinstall of the child package when a newer
1492 # slot will be used instead.
1493 if rebuild_if_new_slot:
1494 new_child = self._slot_operator_update_probe(dep,
1495 new_child_slot=True)
1497 self._slot_operator_update_backtrack(dep,
1498 new_child_slot=new_child)
1502 if self._slot_operator_update_probe(dep):
1503 self._slot_operator_update_backtrack(dep)
1506 def _reinstall_for_flags(self, pkg, forced_flags,
1507 orig_use, orig_iuse, cur_use, cur_iuse):
1508 """Return a set of flags that trigger reinstallation, or None if there
1509 are no such flags."""
1511 # binpkg_respect_use: Behave like newuse by default. If newuse is
1512 # False and changed_use is True, then behave like changed_use.
1513 binpkg_respect_use = (pkg.built and
1514 self._dynamic_config.myparams.get("binpkg_respect_use")
1516 newuse = "--newuse" in self._frozen_config.myopts
1517 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
1518 feature_flags = _get_feature_flags(
1519 _get_eapi_attrs(pkg.eapi))
1521 if newuse or (binpkg_respect_use and not changed_use):
1522 flags = set(orig_iuse.symmetric_difference(
1523 cur_iuse).difference(forced_flags))
1524 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
1525 cur_iuse.intersection(cur_use)))
1526 flags.difference_update(feature_flags)
1530 elif changed_use or binpkg_respect_use:
1531 flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
1532 cur_iuse.intersection(cur_use)))
1533 flags.difference_update(feature_flags)
1538 def _create_graph(self, allow_unsatisfied=False):
1539 dep_stack = self._dynamic_config._dep_stack
1540 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
1541 while dep_stack or dep_disjunctive_stack:
1542 self._spinner_update()
1544 dep = dep_stack.pop()
1545 if isinstance(dep, Package):
1546 if not self._add_pkg_deps(dep,
1547 allow_unsatisfied=allow_unsatisfied):
1550 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
1552 if dep_disjunctive_stack:
1553 if not self._pop_disjunction(allow_unsatisfied):
1557 def _expand_set_args(self, input_args, add_to_digraph=False):
1559 Iterate over a list of DependencyArg instances and yield all
1560 instances given in the input together with additional SetArg
1561 instances that are generated from nested sets.
1562 @param input_args: An iterable of DependencyArg instances
1563 @type input_args: Iterable
1564 @param add_to_digraph: If True then add SetArg instances
1565 to the digraph, in order to record parent -> child
1566 relationships from nested sets
1567 @type add_to_digraph: Boolean
1569 @return: All args given in the input together with additional
1570 SetArg instances that are generated from nested sets
1573 traversed_set_args = set()
1575 for arg in input_args:
1576 if not isinstance(arg, SetArg):
1580 root_config = arg.root_config
1581 depgraph_sets = self._dynamic_config.sets[root_config.root]
1584 arg = arg_stack.pop()
1585 if arg in traversed_set_args:
1587 traversed_set_args.add(arg)
1590 self._dynamic_config.digraph.add(arg, None,
1591 priority=BlockerDepPriority.instance)
1595 # Traverse nested sets and add them to the stack
1596 # if they're not already in the graph. Also, graph
1597 # edges between parent and nested sets.
1598 for token in arg.pset.getNonAtoms():
1599 if not token.startswith(SETPREFIX):
1601 s = token[len(SETPREFIX):]
1602 nested_set = depgraph_sets.sets.get(s)
1603 if nested_set is None:
1604 nested_set = root_config.sets.get(s)
1605 if nested_set is not None:
1606 nested_arg = SetArg(arg=token, pset=nested_set,
1607 root_config=root_config)
1608 arg_stack.append(nested_arg)
1610 self._dynamic_config.digraph.add(nested_arg, arg,
1611 priority=BlockerDepPriority.instance)
1612 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
1614 def _add_dep(self, dep, allow_unsatisfied=False):
1615 debug = "--debug" in self._frozen_config.myopts
1616 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
1617 nodeps = "--nodeps" in self._frozen_config.myopts
1619 if not buildpkgonly and \
1621 not dep.collapsed_priority.ignored and \
1622 not dep.collapsed_priority.optional and \
1623 dep.parent not in self._dynamic_config._slot_collision_nodes:
1624 if dep.parent.onlydeps:
1625 # It's safe to ignore blockers if the
1626 # parent is an --onlydeps node.
1628 # The blocker applies to the root where
1629 # the parent is or will be installed.
1630 blocker = Blocker(atom=dep.atom,
1631 eapi=dep.parent.eapi,
1632 priority=dep.priority, root=dep.parent.root)
1633 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
1636 if dep.child is None:
1637 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
1638 onlydeps=dep.onlydeps)
1640 # The caller has selected a specific package
1641 # via self._minimize_packages().
1643 existing_node = self._dynamic_config._slot_pkg_map[
1644 dep.root].get(dep_pkg.slot_atom)
1647 if (dep.collapsed_priority.optional or
1648 dep.collapsed_priority.ignored):
1649 # This is an unnecessary build-time dep.
1651 if allow_unsatisfied:
1652 self._dynamic_config._unsatisfied_deps.append(dep)
1654 self._dynamic_config._unsatisfied_deps_for_display.append(
1655 ((dep.root, dep.atom), {"myparent":dep.parent}))
1657 # The parent node should not already be in
1658 # runtime_pkg_mask, since that would trigger an
1659 # infinite backtracking loop.
1660 if self._dynamic_config._allow_backtracking:
1661 if dep.parent in self._dynamic_config._runtime_pkg_mask:
1664 "!!! backtracking loop detected: %s %s\n" % \
1666 self._dynamic_config._runtime_pkg_mask[
1667 dep.parent]), noiselevel=-1)
1668 elif dep.atom.slot_operator_built and \
1669 self._slot_operator_unsatisfied_probe(dep):
1670 self._slot_operator_unsatisfied_backtrack(dep)
1672 elif not self.need_restart():
1673 # Do not backtrack if only USE have to be changed in
1674 # order to satisfy the dependency.
1675 dep_pkg, existing_node = \
1676 self._select_package(dep.root, dep.atom.without_use,
1677 onlydeps=dep.onlydeps)
1679 self._dynamic_config._backtrack_infos["missing dependency"] = dep
1680 self._dynamic_config._need_restart = True
1685 msg.append("backtracking due to unsatisfied dep:")
1686 msg.append(" parent: %s" % dep.parent)
1687 msg.append(" priority: %s" % dep.priority)
1688 msg.append(" root: %s" % dep.root)
1689 msg.append(" atom: %s" % dep.atom)
1691 writemsg_level("".join("%s\n" % l for l in msg),
1692 noiselevel=-1, level=logging.DEBUG)
1696 self._rebuild.add(dep_pkg, dep)
1698 ignore = dep.collapsed_priority.ignored and \
1699 not self._dynamic_config._traverse_ignored_deps
1700 if not ignore and not self._add_pkg(dep_pkg, dep):
1704 def _check_slot_conflict(self, pkg, atom):
1705 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1708 matches = pkg.cpv == existing_node.cpv
1709 if pkg != existing_node and \
1711 # Use package set for matching since it will match via
1712 # PROVIDE when necessary, while match_from_list does not.
1713 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1714 allow_repo=True).findAtomForPackage(existing_node,
1715 modified_use=self._pkg_use_enabled(existing_node)))
1717 return (existing_node, matches)
1719 def _add_pkg(self, pkg, dep):
1721 Adds a package to the depgraph, queues dependencies, and handles
1724 debug = "--debug" in self._frozen_config.myopts
1731 myparent = dep.parent
1732 priority = dep.priority
1734 if priority is None:
1735 priority = DepPriority()
1739 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1740 pkg_use_display(pkg, self._frozen_config.myopts,
1741 modified_use=self._pkg_use_enabled(pkg))),
1742 level=logging.DEBUG, noiselevel=-1)
1743 if isinstance(myparent,
1744 (PackageArg, AtomArg)):
1745 # For PackageArg and AtomArg types, it's
1746 # redundant to display the atom attribute.
1748 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1749 level=logging.DEBUG, noiselevel=-1)
1751 # Display the specific atom from SetArg or
1754 if dep.atom is not dep.atom.unevaluated_atom:
1755 uneval = " (%s)" % (dep.atom.unevaluated_atom,)
1757 "%s%s%s required by %s\n" %
1758 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
1759 level=logging.DEBUG, noiselevel=-1)
1761 # Ensure that the dependencies of the same package
1762 # are never processed more than once.
1763 previously_added = pkg in self._dynamic_config.digraph
1765 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1770 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1771 except portage.exception.InvalidDependString as e:
1772 if not pkg.installed:
1773 # should have been masked before it was selected
1777 # NOTE: REQUIRED_USE checks are delayed until after
1778 # package selection, since we want to prompt the user
1779 # for USE adjustment rather than have REQUIRED_USE
1780 # affect package selection and || dep choices.
1781 if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
1782 eapi_has_required_use(pkg.eapi):
1783 required_use_is_sat = check_required_use(
1784 pkg._metadata["REQUIRED_USE"],
1785 self._pkg_use_enabled(pkg),
1786 pkg.iuse.is_valid_flag,
1788 if not required_use_is_sat:
1789 if dep.atom is not None and dep.parent is not None:
1790 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1793 for parent_atom in arg_atoms:
1794 parent, atom = parent_atom
1795 self._add_parent_atom(pkg, parent_atom)
1799 atom = Atom("=" + pkg.cpv)
1800 self._dynamic_config._unsatisfied_deps_for_display.append(
1802 {"myparent" : dep.parent, "show_req_use" : pkg}))
1803 self._dynamic_config._skip_restart = True
1806 if not pkg.onlydeps:
1808 existing_node, existing_node_matches = \
1809 self._check_slot_conflict(pkg, dep.atom)
1810 slot_collision = False
1812 if existing_node_matches:
1813 # The existing node can be reused.
1814 if pkg != existing_node:
1816 previously_added = True
1818 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1819 except InvalidDependString as e:
1820 if not pkg.installed:
1821 # should have been masked before
1827 "%s%s %s\n" % ("Re-used Child:".ljust(15),
1828 pkg, pkg_use_display(pkg,
1829 self._frozen_config.myopts,
1830 modified_use=self._pkg_use_enabled(pkg))),
1831 level=logging.DEBUG, noiselevel=-1)
1834 self._add_slot_conflict(pkg)
1837 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1838 existing_node, pkg_use_display(existing_node,
1839 self._frozen_config.myopts,
1840 modified_use=self._pkg_use_enabled(existing_node))),
1841 level=logging.DEBUG, noiselevel=-1)
1843 slot_collision = True
1846 # Now add this node to the graph so that self.display()
1847 # can show use flags and --tree portage.output. This node is
1848 # only being partially added to the graph. It must not be
1849 # allowed to interfere with the other nodes that have been
1850 # added. Do not overwrite data for existing nodes in
1851 # self._dynamic_config.mydbapi since that data will be used for blocker
1853 # Even though the graph is now invalid, continue to process
1854 # dependencies so that things like --fetchonly can still
1855 # function despite collisions.
1857 elif not previously_added:
1858 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1859 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1860 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1861 self._dynamic_config._highest_pkg_cache.clear()
1862 self._check_masks(pkg)
1864 if not pkg.installed:
1865 # Allow this package to satisfy old-style virtuals in case it
1866 # doesn't already. Any pre-existing providers will be preferred
1869 pkgsettings.setinst(pkg.cpv, pkg._metadata)
1870 # For consistency, also update the global virtuals.
1871 settings = self._frozen_config.roots[pkg.root].settings
1873 settings.setinst(pkg.cpv, pkg._metadata)
1875 except portage.exception.InvalidDependString:
1876 if not pkg.installed:
1877 # should have been masked before it was selected
1881 self._dynamic_config._set_nodes.add(pkg)
1883 # Do this even for onlydeps, so that the
1884 # parent/child relationship is always known in case
1885 # self._show_slot_collision_notice() needs to be called later.
1886 # If a direct circular dependency is not an unsatisfied
1887 # buildtime dependency then drop it here since otherwise
1888 # it can skew the merge order calculation in an unwanted
1890 if pkg != dep.parent or \
1891 (priority.buildtime and not priority.satisfied):
1892 self._dynamic_config.digraph.add(pkg,
1893 dep.parent, priority=priority)
1894 if dep.atom is not None and dep.parent is not None:
1895 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1898 for parent_atom in arg_atoms:
1899 parent, atom = parent_atom
1900 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1901 self._add_parent_atom(pkg, parent_atom)
1903 # This section determines whether we go deeper into dependencies or not.
1904 # We want to go deeper on a few occasions:
1905 # Installing package A, we need to make sure package A's deps are met.
1906 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1907 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1908 if arg_atoms and depth > 0:
1909 for parent, atom in arg_atoms:
1910 if parent.reset_depth:
1914 if previously_added and pkg.depth is not None:
1915 depth = min(pkg.depth, depth)
1917 deep = self._dynamic_config.myparams.get("deep", 0)
1918 update = "--update" in self._frozen_config.myopts
1920 dep.want_update = (not self._dynamic_config._complete_mode and
1921 (arg_atoms or update) and
1922 not (deep is not True and depth > deep))
1925 if (not pkg.onlydeps and
1926 dep.atom and dep.atom.slot_operator is not None):
1927 self._add_slot_operator_dep(dep)
1929 recurse = deep is True or depth + 1 <= deep
1930 dep_stack = self._dynamic_config._dep_stack
1931 if "recurse" not in self._dynamic_config.myparams:
1933 elif pkg.installed and not recurse:
1934 dep_stack = self._dynamic_config._ignored_deps
1936 self._spinner_update()
1938 if not previously_added:
1939 dep_stack.append(pkg)
1942 def _check_masks(self, pkg):
1944 slot_key = (pkg.root, pkg.slot_atom)
1946 # Check for upgrades in the same slot that are
1947 # masked due to a LICENSE change in a newer
1948 # version that is not masked for any other reason.
1949 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1950 if other_pkg is not None and pkg < other_pkg:
1951 self._dynamic_config._masked_license_updates.add(other_pkg)
1953 def _add_parent_atom(self, pkg, parent_atom):
1954 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1955 if parent_atoms is None:
1956 parent_atoms = set()
1957 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1958 parent_atoms.add(parent_atom)
1960 def _add_slot_operator_dep(self, dep):
1961 slot_key = (dep.root, dep.child.slot_atom)
1962 slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
1963 if slot_info is None:
1965 self._dynamic_config._slot_operator_deps[slot_key] = slot_info
1966 slot_info.append(dep)
1968 def _add_slot_conflict(self, pkg):
1969 self._dynamic_config._slot_collision_nodes.add(pkg)
1970 slot_key = (pkg.slot_atom, pkg.root)
1971 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1972 if slot_nodes is None:
1974 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1975 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1978 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1981 metadata = pkg._metadata
1982 removal_action = "remove" in self._dynamic_config.myparams
1983 eapi_attrs = _get_eapi_attrs(pkg.eapi)
1986 for k in Package._dep_keys:
1987 edepend[k] = metadata[k]
1989 if not pkg.built and \
1990 "--buildpkgonly" in self._frozen_config.myopts and \
1991 "deep" not in self._dynamic_config.myparams:
1992 edepend["RDEPEND"] = ""
1993 edepend["PDEPEND"] = ""
1995 ignore_build_time_deps = False
1996 if pkg.built and not removal_action:
1997 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1998 # Pull in build time deps as requested, but marked them as
1999 # "optional" since they are not strictly required. This allows
2000 # more freedom in the merge order calculation for solving
2001 # circular dependencies. Don't convert to PDEPEND since that
2002 # could make --with-bdeps=y less effective if it is used to
2003 # adjust merge order to prevent built_with_use() calls from
2007 ignore_build_time_deps = True
2009 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
2010 # Removal actions never traverse ignored buildtime
2011 # dependencies, so it's safe to discard them early.
2012 edepend["DEPEND"] = ""
2013 edepend["HDEPEND"] = ""
2014 ignore_build_time_deps = True
2016 ignore_depend_deps = ignore_build_time_deps
2017 ignore_hdepend_deps = ignore_build_time_deps
2020 depend_root = myroot
2022 if eapi_attrs.hdepend:
2023 depend_root = myroot
2025 depend_root = self._frozen_config._running_root.root
2026 root_deps = self._frozen_config.myopts.get("--root-deps")
2027 if root_deps is not None:
2028 if root_deps is True:
2029 depend_root = myroot
2030 elif root_deps == "rdeps":
2031 ignore_depend_deps = True
2033 # If rebuild mode is not enabled, it's safe to discard ignored
2034 # build-time dependencies. If you want these deps to be traversed
2035 # in "complete" mode then you need to specify --with-bdeps=y.
2036 if not self._rebuild.rebuild:
2037 if ignore_depend_deps:
2038 edepend["DEPEND"] = ""
2039 if ignore_hdepend_deps:
2040 edepend["HDEPEND"] = ""
2043 (depend_root, edepend["DEPEND"],
2044 self._priority(buildtime=True,
2045 optional=(pkg.built or ignore_depend_deps),
2046 ignored=ignore_depend_deps)),
2047 (self._frozen_config._running_root.root, edepend["HDEPEND"],
2048 self._priority(buildtime=True,
2049 optional=(pkg.built or ignore_hdepend_deps),
2050 ignored=ignore_hdepend_deps)),
2051 (myroot, edepend["RDEPEND"],
2052 self._priority(runtime=True)),
2053 (myroot, edepend["PDEPEND"],
2054 self._priority(runtime_post=True))
2057 debug = "--debug" in self._frozen_config.myopts
2059 for dep_root, dep_string, dep_priority in deps:
2063 writemsg_level("\nParent: %s\n" % (pkg,),
2064 noiselevel=-1, level=logging.DEBUG)
2065 writemsg_level("Depstring: %s\n" % (dep_string,),
2066 noiselevel=-1, level=logging.DEBUG)
2067 writemsg_level("Priority: %s\n" % (dep_priority,),
2068 noiselevel=-1, level=logging.DEBUG)
2071 dep_string = portage.dep.use_reduce(dep_string,
2072 uselist=self._pkg_use_enabled(pkg),
2073 is_valid_flag=pkg.iuse.is_valid_flag,
2074 opconvert=True, token_class=Atom,
2076 except portage.exception.InvalidDependString as e:
2077 if not pkg.installed:
2078 # should have been masked before it was selected
2082 # Try again, but omit the is_valid_flag argument, since
2083 # invalid USE conditionals are a common problem and it's
2084 # practical to ignore this issue for installed packages.
2086 dep_string = portage.dep.use_reduce(dep_string,
2087 uselist=self._pkg_use_enabled(pkg),
2088 opconvert=True, token_class=Atom,
2090 except portage.exception.InvalidDependString as e:
2091 self._dynamic_config._masked_installed.add(pkg)
2096 dep_string = list(self._queue_disjunctive_deps(
2097 pkg, dep_root, dep_priority, dep_string))
2098 except portage.exception.InvalidDependString as e:
2100 self._dynamic_config._masked_installed.add(pkg)
2104 # should have been masked before it was selected
2110 if not self._add_pkg_dep_string(
2111 pkg, dep_root, dep_priority, dep_string,
2115 self._dynamic_config._traversed_pkg_deps.add(pkg)
2118 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2120 _autounmask_backup = self._dynamic_config._autounmask
2121 if dep_priority.optional or dep_priority.ignored:
2122 # Temporarily disable autounmask for deps that
2123 # don't necessarily need to be satisfied.
2124 self._dynamic_config._autounmask = False
2126 return self._wrapped_add_pkg_dep_string(
2127 pkg, dep_root, dep_priority, dep_string,
2130 self._dynamic_config._autounmask = _autounmask_backup
2132 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
2133 dep_string, allow_unsatisfied):
2134 depth = pkg.depth + 1
2135 deep = self._dynamic_config.myparams.get("deep", 0)
2136 recurse_satisfied = deep is True or depth <= deep
2137 debug = "--debug" in self._frozen_config.myopts
2138 strict = pkg.type_name != "installed"
2141 writemsg_level("\nParent: %s\n" % (pkg,),
2142 noiselevel=-1, level=logging.DEBUG)
2143 dep_repr = portage.dep.paren_enclose(dep_string,
2144 unevaluated_atom=True, opconvert=True)
2145 writemsg_level("Depstring: %s\n" % (dep_repr,),
2146 noiselevel=-1, level=logging.DEBUG)
2147 writemsg_level("Priority: %s\n" % (dep_priority,),
2148 noiselevel=-1, level=logging.DEBUG)
2151 selected_atoms = self._select_atoms(dep_root,
2152 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
2153 strict=strict, priority=dep_priority)
2154 except portage.exception.InvalidDependString:
2156 self._dynamic_config._masked_installed.add(pkg)
2159 # should have been masked before it was selected
2163 writemsg_level("Candidates: %s\n" % \
2164 ([str(x) for x in selected_atoms[pkg]],),
2165 noiselevel=-1, level=logging.DEBUG)
2167 root_config = self._frozen_config.roots[dep_root]
2168 vardb = root_config.trees["vartree"].dbapi
2169 traversed_virt_pkgs = set()
2171 reinstall_atoms = self._frozen_config.reinstall_atoms
2172 for atom, child in self._minimize_children(
2173 pkg, dep_priority, root_config, selected_atoms[pkg]):
2175 # If this was a specially generated virtual atom
2176 # from dep_check, map it back to the original, in
2177 # order to avoid distortion in places like display
2178 # or conflict resolution code.
2179 is_virt = hasattr(atom, '_orig_atom')
2180 atom = getattr(atom, '_orig_atom', atom)
2182 if atom.blocker and \
2183 (dep_priority.optional or dep_priority.ignored):
2184 # For --with-bdeps, ignore build-time only blockers
2185 # that originate from built packages.
2188 mypriority = dep_priority.copy()
2189 if not atom.blocker:
2190 inst_pkgs = [inst_pkg for inst_pkg in
2191 reversed(vardb.match_pkgs(atom))
2192 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2193 modified_use=self._pkg_use_enabled(inst_pkg))]
2195 for inst_pkg in inst_pkgs:
2196 if self._pkg_visibility_check(inst_pkg):
2198 mypriority.satisfied = inst_pkg
2200 if not mypriority.satisfied:
2201 # none visible, so use highest
2202 mypriority.satisfied = inst_pkgs[0]
2204 dep = Dependency(atom=atom,
2205 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
2206 priority=mypriority, root=dep_root)
2208 # In some cases, dep_check will return deps that shouldn't
2209 # be proccessed any further, so they are identified and
2210 # discarded here. Try to discard as few as possible since
2211 # discarded dependencies reduce the amount of information
2212 # available for optimization of merge order.
2214 if not atom.blocker and \
2215 not recurse_satisfied and \
2216 mypriority.satisfied and \
2217 mypriority.satisfied.visible and \
2218 dep.child is not None and \
2219 not dep.child.installed and \
2220 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2221 dep.child.slot_atom) is None:
2224 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2225 except InvalidDependString:
2226 if not dep.child.installed:
2230 # Existing child selection may not be valid unless
2231 # it's added to the graph immediately, since "complete"
2232 # mode may select a different child later.
2235 self._dynamic_config._ignored_deps.append(dep)
2238 if dep_priority.ignored and \
2239 not self._dynamic_config._traverse_ignored_deps:
2240 if is_virt and dep.child is not None:
2241 traversed_virt_pkgs.add(dep.child)
2243 self._dynamic_config._ignored_deps.append(dep)
2245 if not self._add_dep(dep,
2246 allow_unsatisfied=allow_unsatisfied):
2248 if is_virt and dep.child is not None:
2249 traversed_virt_pkgs.add(dep.child)
2251 selected_atoms.pop(pkg)
2253 # Add selected indirect virtual deps to the graph. This
2254 # takes advantage of circular dependency avoidance that's done
2255 # by dep_zapdeps. We preserve actual parent/child relationships
2256 # here in order to avoid distorting the dependency graph like
2257 # <=portage-2.1.6.x did.
2258 for virt_dep, atoms in selected_atoms.items():
2260 virt_pkg = virt_dep.child
2261 if virt_pkg not in traversed_virt_pkgs:
2265 writemsg_level("\nCandidates: %s: %s\n" % \
2266 (virt_pkg.cpv, [str(x) for x in atoms]),
2267 noiselevel=-1, level=logging.DEBUG)
2269 if not dep_priority.ignored or \
2270 self._dynamic_config._traverse_ignored_deps:
2272 inst_pkgs = [inst_pkg for inst_pkg in
2273 reversed(vardb.match_pkgs(virt_dep.atom))
2274 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2275 modified_use=self._pkg_use_enabled(inst_pkg))]
2277 for inst_pkg in inst_pkgs:
2278 if self._pkg_visibility_check(inst_pkg):
2280 virt_dep.priority.satisfied = inst_pkg
2282 if not virt_dep.priority.satisfied:
2283 # none visible, so use highest
2284 virt_dep.priority.satisfied = inst_pkgs[0]
2286 if not self._add_pkg(virt_pkg, virt_dep):
2289 for atom, child in self._minimize_children(
2290 pkg, self._priority(runtime=True), root_config, atoms):
2292 # If this was a specially generated virtual atom
2293 # from dep_check, map it back to the original, in
2294 # order to avoid distortion in places like display
2295 # or conflict resolution code.
2296 is_virt = hasattr(atom, '_orig_atom')
2297 atom = getattr(atom, '_orig_atom', atom)
2299 # This is a GLEP 37 virtual, so its deps are all runtime.
2300 mypriority = self._priority(runtime=True)
2301 if not atom.blocker:
2302 inst_pkgs = [inst_pkg for inst_pkg in
2303 reversed(vardb.match_pkgs(atom))
2304 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2305 modified_use=self._pkg_use_enabled(inst_pkg))]
2307 for inst_pkg in inst_pkgs:
2308 if self._pkg_visibility_check(inst_pkg):
2310 mypriority.satisfied = inst_pkg
2312 if not mypriority.satisfied:
2313 # none visible, so use highest
2314 mypriority.satisfied = inst_pkgs[0]
2316 # Dependencies of virtuals are considered to have the
2317 # same depth as the virtual itself.
2318 dep = Dependency(atom=atom,
2319 blocker=atom.blocker, child=child, depth=virt_dep.depth,
2320 parent=virt_pkg, priority=mypriority, root=dep_root,
2321 collapsed_parent=pkg, collapsed_priority=dep_priority)
2324 if not atom.blocker and \
2325 not recurse_satisfied and \
2326 mypriority.satisfied and \
2327 mypriority.satisfied.visible and \
2328 dep.child is not None and \
2329 not dep.child.installed and \
2330 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2331 dep.child.slot_atom) is None:
2334 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2335 except InvalidDependString:
2336 if not dep.child.installed:
2342 self._dynamic_config._ignored_deps.append(dep)
2345 if dep_priority.ignored and \
2346 not self._dynamic_config._traverse_ignored_deps:
2347 if is_virt and dep.child is not None:
2348 traversed_virt_pkgs.add(dep.child)
2350 self._dynamic_config._ignored_deps.append(dep)
2352 if not self._add_dep(dep,
2353 allow_unsatisfied=allow_unsatisfied):
2355 if is_virt and dep.child is not None:
2356 traversed_virt_pkgs.add(dep.child)
2359 writemsg_level("\nExiting... %s\n" % (pkg,),
2360 noiselevel=-1, level=logging.DEBUG)
2364 def _minimize_children(self, parent, priority, root_config, atoms):
2366 Selects packages to satisfy the given atoms, and minimizes the
2367 number of selected packages. This serves to identify and eliminate
2368 redundant package selections when multiple atoms happen to specify
2378 dep_pkg, existing_node = self._select_package(
2379 root_config.root, atom)
2383 atom_pkg_map[atom] = dep_pkg
2385 if len(atom_pkg_map) < 2:
2386 for item in atom_pkg_map.items():
2392 for atom, pkg in atom_pkg_map.items():
2393 pkg_atom_map.setdefault(pkg, set()).add(atom)
2394 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
2396 for pkgs in cp_pkg_map.values():
2399 for atom in pkg_atom_map[pkg]:
2403 # Use a digraph to identify and eliminate any
2404 # redundant package selections.
2405 atom_pkg_graph = digraph()
2408 for atom in pkg_atom_map[pkg1]:
2410 atom_pkg_graph.add(pkg1, atom)
2411 atom_set = InternalPackageSet(initial_atoms=(atom,),
2416 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
2417 atom_pkg_graph.add(pkg2, atom)
2420 eliminate_pkg = True
2421 for atom in atom_pkg_graph.parent_nodes(pkg):
2422 if len(atom_pkg_graph.child_nodes(atom)) < 2:
2423 eliminate_pkg = False
2426 atom_pkg_graph.remove(pkg)
2428 # Yield ~, =*, < and <= atoms first, since those are more likely to
2429 # cause slot conflicts, and we want those atoms to be displayed
2430 # in the resulting slot conflict message (see bug #291142).
2431 # Give similar treatment to slot/sub-slot atoms.
2435 for atom in cp_atoms:
2436 if atom.slot_operator_built:
2437 abi_atoms.append(atom)
2440 for child_pkg in atom_pkg_graph.child_nodes(atom):
2441 existing_node, matches = \
2442 self._check_slot_conflict(child_pkg, atom)
2443 if existing_node and not matches:
2447 conflict_atoms.append(atom)
2449 normal_atoms.append(atom)
2451 for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
2452 child_pkgs = atom_pkg_graph.child_nodes(atom)
2453 # if more than one child, yield highest version
2454 if len(child_pkgs) > 1:
2456 yield (atom, child_pkgs[-1])
2458 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2460 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
2461 Yields non-disjunctive deps. Raises InvalidDependString when
2464 for x in dep_struct:
2465 if isinstance(x, list):
2466 if x and x[0] == "||":
2467 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2469 for y in self._queue_disjunctive_deps(
2470 pkg, dep_root, dep_priority, x):
2473 # Note: Eventually this will check for PROPERTIES=virtual
2474 # or whatever other metadata gets implemented for this
2476 if x.cp.startswith('virtual/'):
2477 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2481 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2482 self._dynamic_config._dep_disjunctive_stack.append(
2483 (pkg, dep_root, dep_priority, dep_struct))
2485 def _pop_disjunction(self, allow_unsatisfied):
2487 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
2488 populate self._dynamic_config._dep_stack.
2490 pkg, dep_root, dep_priority, dep_struct = \
2491 self._dynamic_config._dep_disjunctive_stack.pop()
2492 if not self._add_pkg_dep_string(
2493 pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
2497 def _priority(self, **kwargs):
2498 if "remove" in self._dynamic_config.myparams:
2499 priority_constructor = UnmergeDepPriority
2501 priority_constructor = DepPriority
2502 return priority_constructor(**kwargs)
2504 def _dep_expand(self, root_config, atom_without_category):
2506 @param root_config: a root config instance
2507 @type root_config: RootConfig
2508 @param atom_without_category: an atom without a category component
2509 @type atom_without_category: String
2511 @return: a list of atoms containing categories (possibly empty)
2513 null_cp = portage.dep_getkey(insert_category_into_atom(
2514 atom_without_category, "null"))
2515 cat, atom_pn = portage.catsplit(null_cp)
2517 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
2519 for db, pkg_type, built, installed, db_keys in dbs:
2520 for cat in db.categories:
2521 if db.cp_list("%s/%s" % (cat, atom_pn)):
2525 for cat in categories:
2526 deps.append(Atom(insert_category_into_atom(
2527 atom_without_category, cat), allow_repo=True))
2530 def _have_new_virt(self, root, atom_cp):
2532 for db, pkg_type, built, installed, db_keys in \
2533 self._dynamic_config._filtered_trees[root]["dbs"]:
2534 if db.cp_list(atom_cp):
2539 def _iter_atoms_for_pkg(self, pkg):
2540 depgraph_sets = self._dynamic_config.sets[pkg.root]
2541 atom_arg_map = depgraph_sets.atom_arg_map
2542 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
2543 if atom.cp != pkg.cp and \
2544 self._have_new_virt(pkg.root, atom.cp):
2547 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
2548 visible_pkgs.reverse() # descending order
2550 for visible_pkg in visible_pkgs:
2551 if visible_pkg.cp != atom.cp:
2553 if pkg >= visible_pkg:
2554 # This is descending order, and we're not
2555 # interested in any versions <= pkg given.
2557 if pkg.slot_atom != visible_pkg.slot_atom:
2558 higher_slot = visible_pkg
2560 if higher_slot is not None:
2562 for arg in atom_arg_map[(atom, pkg.root)]:
2563 if isinstance(arg, PackageArg) and \
2568 def select_files(self, myfiles):
2569 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
2570 self._dynamic_config._initial_arg_list and call self._resolve to create the
2571 appropriate depgraph and return a favorite list."""
2573 debug = "--debug" in self._frozen_config.myopts
2574 root_config = self._frozen_config.roots[self._frozen_config.target_root]
2575 sets = root_config.sets
2576 depgraph_sets = self._dynamic_config.sets[root_config.root]
2578 eroot = root_config.root
2579 root = root_config.settings['ROOT']
2580 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
2581 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
2582 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
2583 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
2584 pkgsettings = self._frozen_config.pkgsettings[eroot]
2586 onlydeps = "--onlydeps" in self._frozen_config.myopts
2589 ext = os.path.splitext(x)[1]
2591 if not os.path.exists(x):
2593 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2594 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2595 elif os.path.exists(
2596 os.path.join(pkgsettings["PKGDIR"], x)):
2597 x = os.path.join(pkgsettings["PKGDIR"], x)
2599 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
2600 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
2601 return 0, myfavorites
2602 mytbz2=portage.xpak.tbz2(x)
2604 cat = mytbz2.getfile("CATEGORY")
2606 cat = _unicode_decode(cat.strip(),
2607 encoding=_encodings['repo.content'])
2608 mykey = cat + "/" + os.path.basename(x)[:-5]
2611 writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
2612 self._dynamic_config._skip_restart = True
2613 return 0, myfavorites
2614 elif os.path.realpath(x) != \
2615 os.path.realpath(bindb.bintree.getname(mykey)):
2616 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2617 self._dynamic_config._skip_restart = True
2618 return 0, myfavorites
2620 pkg = self._pkg(mykey, "binary", root_config,
2622 args.append(PackageArg(arg=x, package=pkg,
2623 root_config=root_config))
2624 elif ext==".ebuild":
2625 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2626 pkgdir = os.path.dirname(ebuild_path)
2627 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2628 cp = pkgdir[len(tree_root)+1:]
2629 error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
2630 "hierarchy or does not exist\n") % x
2631 if not portage.isvalidatom(cp):
2632 writemsg(error_msg, noiselevel=-1)
2633 return 0, myfavorites
2634 cat = portage.catsplit(cp)[0]
2635 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2636 if not portage.isvalidatom("="+mykey):
2637 writemsg(error_msg, noiselevel=-1)
2638 return 0, myfavorites
2639 ebuild_path = portdb.findname(mykey)
2641 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2642 cp, os.path.basename(ebuild_path)):
2643 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2644 self._dynamic_config._skip_restart = True
2645 return 0, myfavorites
2646 if mykey not in portdb.xmatch(
2647 "match-visible", portage.cpv_getkey(mykey)):
2648 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2649 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2650 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2651 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2654 writemsg(error_msg, noiselevel=-1)
2655 return 0, myfavorites
2656 pkg = self._pkg(mykey, "ebuild", root_config,
2657 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2658 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2659 args.append(PackageArg(arg=x, package=pkg,
2660 root_config=root_config))
2661 elif x.startswith(os.path.sep):
2662 if not x.startswith(eroot):
2663 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2664 " $EROOT.\n") % x, noiselevel=-1)
2665 self._dynamic_config._skip_restart = True
2667 # Queue these up since it's most efficient to handle
2668 # multiple files in a single iter_owners() call.
2669 lookup_owners.append(x)
2670 elif x.startswith("." + os.sep) or \
2671 x.startswith(".." + os.sep):
2672 f = os.path.abspath(x)
2673 if not f.startswith(eroot):
2674 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2675 " $EROOT.\n") % (f, x), noiselevel=-1)
2676 self._dynamic_config._skip_restart = True
2678 lookup_owners.append(f)
2680 if x in ("system", "world"):
2682 if x.startswith(SETPREFIX):
2683 s = x[len(SETPREFIX):]
2685 raise portage.exception.PackageSetNotFound(s)
2686 if s in depgraph_sets.sets:
2689 depgraph_sets.sets[s] = pset
2690 args.append(SetArg(arg=x, pset=pset,
2691 root_config=root_config))
2693 if not is_valid_package_atom(x, allow_repo=True):
2694 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2696 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2697 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2698 self._dynamic_config._skip_restart = True
2700 # Don't expand categories or old-style virtuals here unless
2701 # necessary. Expansion of old-style virtuals here causes at
2702 # least the following problems:
2703 # 1) It's more difficult to determine which set(s) an atom
2704 # came from, if any.
2705 # 2) It takes away freedom from the resolver to choose other
2706 # possible expansions when necessary.
2707 if "/" in x.split(":")[0]:
2708 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2709 root_config=root_config))
2711 expanded_atoms = self._dep_expand(root_config, x)
2712 installed_cp_set = set()
2713 for atom in expanded_atoms:
2714 if vardb.cp_list(atom.cp):
2715 installed_cp_set.add(atom.cp)
2717 if len(installed_cp_set) > 1:
2718 non_virtual_cps = set()
2719 for atom_cp in installed_cp_set:
2720 if not atom_cp.startswith("virtual/"):
2721 non_virtual_cps.add(atom_cp)
2722 if len(non_virtual_cps) == 1:
2723 installed_cp_set = non_virtual_cps
2725 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2726 installed_cp = next(iter(installed_cp_set))
2727 for atom in expanded_atoms:
2728 if atom.cp == installed_cp:
2730 for pkg in self._iter_match_pkgs_any(
2731 root_config, atom.without_use,
2733 if not pkg.installed:
2737 expanded_atoms = [atom]
2740 # If a non-virtual package and one or more virtual packages
2741 # are in expanded_atoms, use the non-virtual package.
2742 if len(expanded_atoms) > 1:
2743 number_of_virtuals = 0
2744 for expanded_atom in expanded_atoms:
2745 if expanded_atom.cp.startswith("virtual/"):
2746 number_of_virtuals += 1
2748 candidate = expanded_atom
2749 if len(expanded_atoms) - number_of_virtuals == 1:
2750 expanded_atoms = [ candidate ]
2752 if len(expanded_atoms) > 1:
2753 writemsg("\n\n", noiselevel=-1)
2754 ambiguous_package_name(x, expanded_atoms, root_config,
2755 self._frozen_config.spinner, self._frozen_config.myopts)
2756 self._dynamic_config._skip_restart = True
2757 return False, myfavorites
2759 atom = expanded_atoms[0]
2761 null_atom = Atom(insert_category_into_atom(x, "null"),
2763 cat, atom_pn = portage.catsplit(null_atom.cp)
2764 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2766 # Allow the depgraph to choose which virtual.
2767 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2772 if atom.use and atom.use.conditional:
2774 ("\n\n!!! '%s' contains a conditional " + \
2775 "which is not allowed.\n") % (x,), noiselevel=-1)
2776 writemsg("!!! Please check ebuild(5) for full details.\n")
2777 self._dynamic_config._skip_restart = True
2780 args.append(AtomArg(arg=x, atom=atom,
2781 root_config=root_config))
2785 search_for_multiple = False
2786 if len(lookup_owners) > 1:
2787 search_for_multiple = True
2789 for x in lookup_owners:
2790 if not search_for_multiple and os.path.isdir(x):
2791 search_for_multiple = True
2792 relative_paths.append(x[len(root)-1:])
2795 for pkg, relative_path in \
2796 real_vardb._owners.iter_owners(relative_paths):
2797 owners.add(pkg.mycpv)
2798 if not search_for_multiple:
2802 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2803 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2804 self._dynamic_config._skip_restart = True
2808 pkg = vardb._pkg_str(cpv, None)
2809 atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
2810 args.append(AtomArg(arg=atom, atom=atom,
2811 root_config=root_config))
2813 if "--update" in self._frozen_config.myopts:
2814 # In some cases, the greedy slots behavior can pull in a slot that
2815 # the user would want to uninstall due to it being blocked by a
2816 # newer version in a different slot. Therefore, it's necessary to
2817 # detect and discard any that should be uninstalled. Each time
2818 # that arguments are updated, package selections are repeated in
2819 # order to ensure consistency with the current arguments:
2821 # 1) Initialize args
2822 # 2) Select packages and generate initial greedy atoms
2823 # 3) Update args with greedy atoms
2824 # 4) Select packages and generate greedy atoms again, while
2825 # accounting for any blockers between selected packages
2826 # 5) Update args with revised greedy atoms
2828 self._set_args(args)
2831 greedy_args.append(arg)
2832 if not isinstance(arg, AtomArg):
2834 for atom in self._greedy_slots(arg.root_config, arg.atom):
2836 AtomArg(arg=arg.arg, atom=atom,
2837 root_config=arg.root_config))
2839 self._set_args(greedy_args)
2842 # Revise greedy atoms, accounting for any blockers
2843 # between selected packages.
2844 revised_greedy_args = []
2846 revised_greedy_args.append(arg)
2847 if not isinstance(arg, AtomArg):
2849 for atom in self._greedy_slots(arg.root_config, arg.atom,
2850 blocker_lookahead=True):
2851 revised_greedy_args.append(
2852 AtomArg(arg=arg.arg, atom=atom,
2853 root_config=arg.root_config))
2854 args = revised_greedy_args
2855 del revised_greedy_args
2857 args.extend(self._gen_reinstall_sets())
2858 self._set_args(args)
2860 myfavorites = set(myfavorites)
2862 if isinstance(arg, (AtomArg, PackageArg)):
2863 myfavorites.add(arg.atom)
2864 elif isinstance(arg, SetArg):
2865 if not arg.internal:
2866 myfavorites.add(arg.arg)
2867 myfavorites = list(myfavorites)
2870 portage.writemsg("\n", noiselevel=-1)
2871 # Order needs to be preserved since a feature of --nodeps
2872 # is to allow the user to force a specific merge order.
2873 self._dynamic_config._initial_arg_list = args[:]
2875 return self._resolve(myfavorites)
2877 def _gen_reinstall_sets(self):
2880 for root, atom in self._rebuild.rebuild_list:
2881 atom_list.append((root, '__auto_rebuild__', atom))
2882 for root, atom in self._rebuild.reinstall_list:
2883 atom_list.append((root, '__auto_reinstall__', atom))
2884 for root, atom in self._dynamic_config._slot_operator_replace_installed:
2885 atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
2888 for root, set_name, atom in atom_list:
2889 set_dict.setdefault((root, set_name), []).append(atom)
2891 for (root, set_name), atoms in set_dict.items():
2892 yield SetArg(arg=(SETPREFIX + set_name),
2893 # Set reset_depth=False here, since we don't want these
2894 # special sets to interact with depth calculations (see
2895 # the emerge --deep=DEPTH option), though we want them
2896 # to behave like normal arguments in most other respects.
2897 pset=InternalPackageSet(initial_atoms=atoms),
2898 force_reinstall=True,
2901 root_config=self._frozen_config.roots[root])
2903 def _resolve(self, myfavorites):
2904 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2905 call self._creategraph to process theier deps and return
2907 debug = "--debug" in self._frozen_config.myopts
2908 onlydeps = "--onlydeps" in self._frozen_config.myopts
2909 myroot = self._frozen_config.target_root
2910 pkgsettings = self._frozen_config.pkgsettings[myroot]
2911 pprovideddict = pkgsettings.pprovideddict
2912 virtuals = pkgsettings.getvirtuals()
2913 args = self._dynamic_config._initial_arg_list[:]
2915 for arg in self._expand_set_args(args, add_to_digraph=True):
2916 for atom in arg.pset.getAtoms():
2917 self._spinner_update()
2918 dep = Dependency(atom=atom, onlydeps=onlydeps,
2919 root=myroot, parent=arg)
2921 pprovided = pprovideddict.get(atom.cp)
2922 if pprovided and portage.match_from_list(atom, pprovided):
2923 # A provided package has been specified on the command line.
2924 self._dynamic_config._pprovided_args.append((arg, atom))
2926 if isinstance(arg, PackageArg):
2927 if not self._add_pkg(arg.package, dep) or \
2928 not self._create_graph():
2929 if not self.need_restart():
2930 sys.stderr.write(("\n\n!!! Problem " + \
2931 "resolving dependencies for %s\n") % \
2933 return 0, myfavorites
2936 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2937 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2938 pkg, existing_node = self._select_package(
2939 myroot, atom, onlydeps=onlydeps)
2941 pprovided_match = False
2942 for virt_choice in virtuals.get(atom.cp, []):
2943 expanded_atom = portage.dep.Atom(
2944 atom.replace(atom.cp, virt_choice.cp, 1))
2945 pprovided = pprovideddict.get(expanded_atom.cp)
2947 portage.match_from_list(expanded_atom, pprovided):
2948 # A provided package has been
2949 # specified on the command line.
2950 self._dynamic_config._pprovided_args.append((arg, atom))
2951 pprovided_match = True
2956 if not (isinstance(arg, SetArg) and \
2957 arg.name in ("selected", "system", "world")):
2958 self._dynamic_config._unsatisfied_deps_for_display.append(
2959 ((myroot, atom), {"myparent" : arg}))
2960 return 0, myfavorites
2962 self._dynamic_config._missing_args.append((arg, atom))
2964 if atom.cp != pkg.cp:
2965 # For old-style virtuals, we need to repeat the
2966 # package.provided check against the selected package.
2967 expanded_atom = atom.replace(atom.cp, pkg.cp)
2968 pprovided = pprovideddict.get(pkg.cp)
2970 portage.match_from_list(expanded_atom, pprovided):
2971 # A provided package has been
2972 # specified on the command line.
2973 self._dynamic_config._pprovided_args.append((arg, atom))
2975 if pkg.installed and \
2976 "selective" not in self._dynamic_config.myparams and \
2977 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2978 pkg, modified_use=self._pkg_use_enabled(pkg)):
2979 self._dynamic_config._unsatisfied_deps_for_display.append(
2980 ((myroot, atom), {"myparent" : arg}))
2981 # Previous behavior was to bail out in this case, but
2982 # since the dep is satisfied by the installed package,
2983 # it's more friendly to continue building the graph
2984 # and just show a warning message. Therefore, only bail
2985 # out here if the atom is not from either the system or
2987 if not (isinstance(arg, SetArg) and \
2988 arg.name in ("selected", "system", "world")):
2989 return 0, myfavorites
2991 # Add the selected package to the graph as soon as possible
2992 # so that later dep_check() calls can use it as feedback
2993 # for making more consistent atom selections.
2994 if not self._add_pkg(pkg, dep):
2995 if self.need_restart():
2997 elif isinstance(arg, SetArg):
2998 writemsg(("\n\n!!! Problem resolving " + \
2999 "dependencies for %s from %s\n") % \
3000 (atom, arg.arg), noiselevel=-1)
3002 writemsg(("\n\n!!! Problem resolving " + \
3003 "dependencies for %s\n") % \
3004 (atom,), noiselevel=-1)
3005 return 0, myfavorites
3007 except SystemExit as e:
3008 raise # Needed else can't exit
3009 except Exception as e:
3010 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
3011 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
3014 # Now that the root packages have been added to the graph,
3015 # process the dependencies.
3016 if not self._create_graph():
3017 return 0, myfavorites
3021 except self._unknown_internal_error:
3022 return False, myfavorites
3024 if (self._dynamic_config._slot_collision_info and
3025 not self._accept_blocker_conflicts()) or \
3026 (self._dynamic_config._allow_backtracking and
3027 "slot conflict" in self._dynamic_config._backtrack_infos):
3028 return False, myfavorites
3030 if self._rebuild.trigger_rebuilds():
3031 backtrack_infos = self._dynamic_config._backtrack_infos
3032 config = backtrack_infos.setdefault("config", {})
3033 config["rebuild_list"] = self._rebuild.rebuild_list
3034 config["reinstall_list"] = self._rebuild.reinstall_list
3035 self._dynamic_config._need_restart = True
3036 return False, myfavorites
3038 if "config" in self._dynamic_config._backtrack_infos and \
3039 ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
3040 "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
3041 self.need_restart():
3042 return False, myfavorites
3044 if not self._dynamic_config._prune_rebuilds and \
3045 self._dynamic_config._slot_operator_replace_installed and \
3046 self._get_missed_updates():
3047 # When there are missed updates, we might have triggered
3048 # some unnecessary rebuilds (see bug #439688). So, prune
3049 # all the rebuilds and backtrack with the problematic
3050 # updates masked. The next backtrack run should pull in
3051 # any rebuilds that are really needed, and this
3052 # prune_rebuilds path should never be entered more than
3053 # once in a series of backtracking nodes (in order to
3054 # avoid a backtracking loop).
3055 backtrack_infos = self._dynamic_config._backtrack_infos
3056 config = backtrack_infos.setdefault("config", {})
3057 config["prune_rebuilds"] = True
3058 self._dynamic_config._need_restart = True
3059 return False, myfavorites
3061 if self.need_restart():
3062 # want_restart_for_use_change triggers this
3063 return False, myfavorites
3065 # Any failures except those due to autounmask *alone* should return
3066 # before this point, since the success_without_autounmask flag that's
3067 # set below is reserved for cases where there are *zero* other
3068 # problems. For reference, see backtrack_depgraph, where it skips the
3069 # get_best_run() call when success_without_autounmask is True.
3071 digraph_nodes = self._dynamic_config.digraph.nodes
3073 if any(x in digraph_nodes for x in
3074 self._dynamic_config._needed_unstable_keywords) or \
3075 any(x in digraph_nodes for x in
3076 self._dynamic_config._needed_p_mask_changes) or \
3077 any(x in digraph_nodes for x in
3078 self._dynamic_config._needed_use_config_changes) or \
3079 any(x in digraph_nodes for x in
3080 self._dynamic_config._needed_license_changes) :
3081 #We failed if the user needs to change the configuration
3082 self._dynamic_config._success_without_autounmask = True
3083 return False, myfavorites
3085 # We're true here unless we are missing binaries.
3086 return (True, myfavorites)
3088 def _set_args(self, args):
3090 Create the "__non_set_args__" package set from atoms and packages given as
3091 arguments. This method can be called multiple times if necessary.
3092 The package selection cache is automatically invalidated, since
3093 arguments influence package selections.
3098 for root in self._dynamic_config.sets:
3099 depgraph_sets = self._dynamic_config.sets[root]
3100 depgraph_sets.sets.setdefault('__non_set_args__',
3101 InternalPackageSet(allow_repo=True)).clear()
3102 depgraph_sets.atoms.clear()
3103 depgraph_sets.atom_arg_map.clear()
3104 set_atoms[root] = []
3105 non_set_atoms[root] = []
3107 # We don't add set args to the digraph here since that
3108 # happens at a later stage and we don't want to make
3109 # any state changes here that aren't reversed by a
3110 # another call to this method.
3111 for arg in self._expand_set_args(args, add_to_digraph=False):
3112 atom_arg_map = self._dynamic_config.sets[
3113 arg.root_config.root].atom_arg_map
3114 if isinstance(arg, SetArg):
3115 atom_group = set_atoms[arg.root_config.root]
3117 atom_group = non_set_atoms[arg.root_config.root]
3119 for atom in arg.pset.getAtoms():
3120 atom_group.append(atom)
3121 atom_key = (atom, arg.root_config.root)
3122 refs = atom_arg_map.get(atom_key)
3125 atom_arg_map[atom_key] = refs
3129 for root in self._dynamic_config.sets:
3130 depgraph_sets = self._dynamic_config.sets[root]
3131 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
3132 non_set_atoms.get(root, [])))
3133 depgraph_sets.sets['__non_set_args__'].update(
3134 non_set_atoms.get(root, []))
3136 # Invalidate the package selection cache, since
3137 # arguments influence package selections.
3138 self._dynamic_config._highest_pkg_cache.clear()
3139 for trees in self._dynamic_config._filtered_trees.values():
3140 trees["porttree"].dbapi._clear_cache()
3142 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3144 Return a list of slot atoms corresponding to installed slots that
3145 differ from the slot of the highest visible match. When
3146 blocker_lookahead is True, slot atoms that would trigger a blocker
3147 conflict are automatically discarded, potentially allowing automatic
3148 uninstallation of older slots when appropriate.
3150 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3151 if highest_pkg is None:
3153 vardb = root_config.trees["vartree"].dbapi
3155 for cpv in vardb.match(atom):
3156 # don't mix new virtuals with old virtuals
3157 pkg = vardb._pkg_str(cpv, None)
3158 if pkg.cp == highest_pkg.cp:
3161 slots.add(highest_pkg.slot)
3165 slots.remove(highest_pkg.slot)
3168 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3169 pkg, in_graph = self._select_package(root_config.root, slot_atom)
3170 if pkg is not None and \
3171 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3172 greedy_pkgs.append(pkg)
3175 if not blocker_lookahead:
3176 return [pkg.slot_atom for pkg in greedy_pkgs]
3179 blocker_dep_keys = Package._dep_keys
3180 for pkg in greedy_pkgs + [highest_pkg]:
3181 dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
3183 selected_atoms = self._select_atoms(
3184 pkg.root, dep_str, self._pkg_use_enabled(pkg),
3185 parent=pkg, strict=True)
3186 except portage.exception.InvalidDependString:
3189 for atoms in selected_atoms.values():
3190 blocker_atoms.extend(x for x in atoms if x.blocker)
3191 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3193 if highest_pkg not in blockers:
3196 # filter packages with invalid deps
3197 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3199 # filter packages that conflict with highest_pkg
3200 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3201 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
3202 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
3207 # If two packages conflict, discard the lower version.
3208 discard_pkgs = set()
3209 greedy_pkgs.sort(reverse=True)
3210 for i in range(len(greedy_pkgs) - 1):
3211 pkg1 = greedy_pkgs[i]
3212 if pkg1 in discard_pkgs:
3214 for j in range(i + 1, len(greedy_pkgs)):
3215 pkg2 = greedy_pkgs[j]
3216 if pkg2 in discard_pkgs:
3218 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
3219 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
3221 discard_pkgs.add(pkg2)
3223 return [pkg.slot_atom for pkg in greedy_pkgs \
3224 if pkg not in discard_pkgs]
3226 def _select_atoms_from_graph(self, *pargs, **kwargs):
3228 Prefer atoms matching packages that have already been
3229 added to the graph or those that are installed and have
3230 not been scheduled for replacement.
3232 kwargs["trees"] = self._dynamic_config._graph_trees
3233 return self._select_atoms_highest_available(*pargs,
3234 **portage._native_kwargs(kwargs))
3236 def _select_atoms_highest_available(self, root, depstring,
3237 myuse=None, parent=None, strict=True, trees=None, priority=None):
3238 """This will raise InvalidDependString if necessary. If trees is
3239 None then self._dynamic_config._filtered_trees is used."""
3241 if not isinstance(depstring, list):
3243 is_valid_flag = None
3244 if parent is not None:
3246 if not parent.installed:
3247 is_valid_flag = parent.iuse.is_valid_flag
3248 depstring = portage.dep.use_reduce(depstring,
3249 uselist=myuse, opconvert=True, token_class=Atom,
3250 is_valid_flag=is_valid_flag, eapi=eapi)
3252 if (self._dynamic_config.myparams.get(
3253 "ignore_built_slot_operator_deps", "n") == "y" and
3254 parent and parent.built):
3255 ignore_built_slot_operator_deps(depstring)
3257 pkgsettings = self._frozen_config.pkgsettings[root]
3259 trees = self._dynamic_config._filtered_trees
3260 mytrees = trees[root]
3261 atom_graph = digraph()
3263 # Temporarily disable autounmask so that || preferences
3264 # account for masking and USE settings.
3265 _autounmask_backup = self._dynamic_config._autounmask
3266 self._dynamic_config._autounmask = False
3267 # backup state for restoration, in case of recursive
3268 # calls to this method
3269 backup_state = mytrees.copy()
3271 # clear state from previous call, in case this
3272 # call is recursive (we have a backup, that we
3273 # will use to restore it later)
3274 mytrees.pop("pkg_use_enabled", None)
3275 mytrees.pop("parent", None)
3276 mytrees.pop("atom_graph", None)
3277 mytrees.pop("priority", None)
3279 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
3280 if parent is not None:
3281 mytrees["parent"] = parent
3282 mytrees["atom_graph"] = atom_graph
3283 if priority is not None:
3284 mytrees["priority"] = priority
3286 mycheck = portage.dep_check(depstring, None,
3287 pkgsettings, myuse=myuse,
3288 myroot=root, trees=trees)
3291 self._dynamic_config._autounmask = _autounmask_backup
3292 mytrees.pop("pkg_use_enabled", None)
3293 mytrees.pop("parent", None)
3294 mytrees.pop("atom_graph", None)
3295 mytrees.pop("priority", None)
3296 mytrees.update(backup_state)
3298 raise portage.exception.InvalidDependString(mycheck[1])
3300 selected_atoms = mycheck[1]
3301 elif parent not in atom_graph:
3302 selected_atoms = {parent : mycheck[1]}
3304 # Recursively traversed virtual dependencies, and their
3305 # direct dependencies, are considered to have the same
3306 # depth as direct dependencies.
3307 if parent.depth is None:
3310 virt_depth = parent.depth + 1
3311 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
3312 selected_atoms = OrderedDict()
3313 node_stack = [(parent, None, None)]
3314 traversed_nodes = set()
3316 node, node_parent, parent_atom = node_stack.pop()
3317 traversed_nodes.add(node)
3321 if node_parent is parent:
3322 if priority is None:
3323 node_priority = None
3325 node_priority = priority.copy()
3327 # virtuals only have runtime deps
3328 node_priority = self._priority(runtime=True)
3330 k = Dependency(atom=parent_atom,
3331 blocker=parent_atom.blocker, child=node,
3332 depth=virt_depth, parent=node_parent,
3333 priority=node_priority, root=node.root)
3336 selected_atoms[k] = child_atoms
3337 for atom_node in atom_graph.child_nodes(node):
3338 child_atom = atom_node[0]
3339 if id(child_atom) not in chosen_atom_ids:
3341 child_atoms.append(child_atom)
3342 for child_node in atom_graph.child_nodes(atom_node):
3343 if child_node in traversed_nodes:
3345 if not portage.match_from_list(
3346 child_atom, [child_node]):
3347 # Typically this means that the atom
3348 # specifies USE deps that are unsatisfied
3349 # by the selected package. The caller will
3350 # record this as an unsatisfied dependency
3353 node_stack.append((child_node, node, child_atom))
3355 return selected_atoms
3357 def _expand_virt_from_graph(self, root, atom):
3358 if not isinstance(atom, Atom):
3360 graphdb = self._dynamic_config.mydbapi[root]
3361 match = graphdb.match_pkgs(atom)
3366 if not pkg.cpv.startswith("virtual/"):
3370 rdepend = self._select_atoms_from_graph(
3371 pkg.root, pkg._metadata.get("RDEPEND", ""),
3372 myuse=self._pkg_use_enabled(pkg),
3373 parent=pkg, strict=False)
3374 except InvalidDependString as e:
3375 writemsg_level("!!! Invalid RDEPEND in " + \
3376 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3377 (pkg.root, pkg.cpv, e),
3378 noiselevel=-1, level=logging.ERROR)
3382 for atoms in rdepend.values():
3384 if hasattr(atom, "_orig_atom"):
3385 # Ignore virtual atoms since we're only
3386 # interested in expanding the real atoms.
3390 def _virt_deps_visible(self, pkg, ignore_use=False):
3392 Assumes pkg is a virtual package. Traverses virtual deps recursively
3393 and returns True if all deps are visible, False otherwise. This is
3394 useful for checking if it will be necessary to expand virtual slots,
3395 for cases like bug #382557.
3398 rdepend = self._select_atoms(
3399 pkg.root, pkg._metadata.get("RDEPEND", ""),
3400 myuse=self._pkg_use_enabled(pkg),
3401 parent=pkg, priority=self._priority(runtime=True))
3402 except InvalidDependString as e:
3403 if not pkg.installed:
3405 writemsg_level("!!! Invalid RDEPEND in " + \
3406 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3407 (pkg.root, pkg.cpv, e),
3408 noiselevel=-1, level=logging.ERROR)
3411 for atoms in rdepend.values():
3414 atom = atom.without_use
3415 pkg, existing = self._select_package(
3417 if pkg is None or not self._pkg_visibility_check(pkg):
3422 def _get_dep_chain(self, start_node, target_atom=None,
3423 unsatisfied_dependency=False):
3425 Returns a list of (atom, node_type) pairs that represent a dep chain.
3426 If target_atom is None, the first package shown is pkg's parent.
3427 If target_atom is not None the first package shown is pkg.
3428 If unsatisfied_dependency is True, the first parent is select who's
3429 dependency is not satisfied by 'pkg'. This is need for USE changes.
3430 (Does not support target_atom.)
3432 traversed_nodes = set()
3436 all_parents = self._dynamic_config._parent_atoms
3437 graph = self._dynamic_config.digraph
3438 verbose_main_repo_display = "--verbose-main-repo-display" in \
3439 self._frozen_config.myopts
3441 def format_pkg(pkg):
3442 pkg_name = "%s" % (pkg.cpv,)
3443 if verbose_main_repo_display or pkg.repo != \
3444 pkg.root_config.settings.repositories.mainRepo().name:
3445 pkg_name += _repo_separator + pkg.repo
3448 if target_atom is not None and isinstance(node, Package):
3449 affecting_use = set()
3450 for dep_str in Package._dep_keys:
3452 affecting_use.update(extract_affecting_use(
3453 node._metadata[dep_str], target_atom,
3455 except InvalidDependString:
3456 if not node.installed:
3458 affecting_use.difference_update(node.use.mask, node.use.force)
3459 pkg_name = format_pkg(node)
3463 for flag in affecting_use:
3464 if flag in self._pkg_use_enabled(node):
3467 usedep.append("-"+flag)
3468 pkg_name += "[%s]" % ",".join(usedep)
3470 dep_chain.append((pkg_name, node.type_name))
3473 # To build a dep chain for the given package we take
3474 # "random" parents form the digraph, except for the
3475 # first package, because we want a parent that forced
3476 # the corresponding change (i.e '>=foo-2', instead 'foo').
3478 traversed_nodes.add(start_node)
3480 start_node_parent_atoms = {}
3481 for ppkg, patom in all_parents.get(node, []):
3482 # Get a list of suitable atoms. For use deps
3483 # (aka unsatisfied_dependency is not None) we
3484 # need that the start_node doesn't match the atom.
3485 if not unsatisfied_dependency or \
3486 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
3487 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
3489 if start_node_parent_atoms:
3490 # If there are parents in all_parents then use one of them.
3491 # If not, then this package got pulled in by an Arg and
3492 # will be correctly handled by the code that handles later
3493 # packages in the dep chain.
3494 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
3497 for ppkg in start_node_parent_atoms[best_match]:
3499 if ppkg in self._dynamic_config._initial_arg_list:
3500 # Stop if reached the top level of the dep chain.
3503 while node is not None:
3504 traversed_nodes.add(node)
3506 if node not in graph:
3507 # The parent is not in the graph due to backtracking.
3510 elif isinstance(node, DependencyArg):
3511 if graph.parent_nodes(node):
3514 node_type = "argument"
3515 dep_chain.append(("%s" % (node,), node_type))
3517 elif node is not start_node:
3518 for ppkg, patom in all_parents[child]:
3520 if child is start_node and unsatisfied_dependency and \
3521 InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
3522 # This atom is satisfied by child, there must be another atom.
3524 atom = patom.unevaluated_atom
3528 priorities = graph.nodes[node][0].get(child)
3529 if priorities is None:
3530 # This edge comes from _parent_atoms and was not added to
3531 # the graph, and _parent_atoms does not contain priorities.
3532 for k in Package._dep_keys:
3533 dep_strings.add(node._metadata[k])
3535 for priority in priorities:
3536 if priority.buildtime:
3537 for k in Package._buildtime_keys:
3538 dep_strings.add(node._metadata[k])
3539 if priority.runtime:
3540 dep_strings.add(node._metadata["RDEPEND"])
3541 if priority.runtime_post:
3542 dep_strings.add(node._metadata["PDEPEND"])
3544 affecting_use = set()
3545 for dep_str in dep_strings:
3547 affecting_use.update(extract_affecting_use(
3548 dep_str, atom, eapi=node.eapi))
3549 except InvalidDependString:
3550 if not node.installed:
3553 #Don't show flags as 'affecting' if the user can't change them,
3554 affecting_use.difference_update(node.use.mask, \
3557 pkg_name = format_pkg(node)
3560 for flag in affecting_use:
3561 if flag in self._pkg_use_enabled(node):
3564 usedep.append("-"+flag)
3565 pkg_name += "[%s]" % ",".join(usedep)
3567 dep_chain.append((pkg_name, node.type_name))
3569 # When traversing to parents, prefer arguments over packages
3570 # since arguments are root nodes. Never traverse the same
3571 # package twice, in order to prevent an infinite loop.
3573 selected_parent = None
3576 parent_unsatisfied = None
3578 for parent in self._dynamic_config.digraph.parent_nodes(node):
3579 if parent in traversed_nodes:
3581 if isinstance(parent, DependencyArg):
3584 if isinstance(parent, Package) and \
3585 parent.operation == "merge":
3586 parent_merge = parent
3587 if unsatisfied_dependency and node is start_node:
3588 # Make sure that pkg doesn't satisfy parent's dependency.
3589 # This ensures that we select the correct parent for use
3591 for ppkg, atom in all_parents[start_node]:
3593 atom_set = InternalPackageSet(initial_atoms=(atom,))
3594 if not atom_set.findAtomForPackage(start_node):
3595 parent_unsatisfied = parent
3598 selected_parent = parent
3600 if parent_unsatisfied is not None:
3601 selected_parent = parent_unsatisfied
3602 elif parent_merge is not None:
3603 # Prefer parent in the merge list (bug #354747).
3604 selected_parent = parent_merge
3605 elif parent_arg is not None:
3606 if self._dynamic_config.digraph.parent_nodes(parent_arg):
3607 selected_parent = parent_arg
3609 dep_chain.append(("%s" % (parent_arg,), "argument"))
3610 selected_parent = None
3612 node = selected_parent
3615 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
3616 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
3618 for node, node_type in dep_chain:
3619 if node_type == "argument":
3620 display_list.append("required by %s (argument)" % node)
3622 display_list.append("required by %s" % node)
3624 msg = "# " + "\n# ".join(display_list) + "\n"
3628 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
3629 check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
3631 When check_backtrack=True, no output is produced and
3632 the method either returns or raises _backtrack_mask if
3633 a matching package has been masked by backtracking.
3635 backtrack_mask = False
3636 autounmask_broke_use_dep = False
3637 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
3639 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
3641 xinfo = '"%s"' % atom.unevaluated_atom
3644 if isinstance(myparent, AtomArg):
3645 xinfo = '"%s"' % (myparent,)
3646 # Discard null/ from failed cpv_expand category expansion.
3647 xinfo = xinfo.replace("null/", "")
3648 if root != self._frozen_config._running_root.root:
3649 xinfo = "%s for %s" % (xinfo, root)
3650 masked_packages = []
3652 missing_use_adjustable = set()
3653 required_use_unsatisfied = []
3654 masked_pkg_instances = set()
3655 have_eapi_mask = False
3656 pkgsettings = self._frozen_config.pkgsettings[root]
3657 root_config = self._frozen_config.roots[root]
3658 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3659 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3660 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
3661 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3662 for db, pkg_type, built, installed, db_keys in dbs:
3665 if hasattr(db, "xmatch"):
3666 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
3668 cpv_list = db.match(atom.without_use)
3670 if atom.repo is None and hasattr(db, "getRepositories"):
3671 repo_list = db.getRepositories()
3673 repo_list = [atom.repo]
3677 for cpv in cpv_list:
3678 for repo in repo_list:
3679 if not db.cpv_exists(cpv, myrepo=repo):
3682 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
3683 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
3684 if metadata is not None and \
3685 portage.eapi_is_supported(metadata["EAPI"]):
3687 repo = metadata.get('repository')
3688 pkg = self._pkg(cpv, pkg_type, root_config,
3689 installed=installed, myrepo=repo)
3690 # pkg._metadata contains calculated USE for ebuilds,
3691 # required later for getMissingLicenses.
3692 metadata = pkg._metadata
3694 # Avoid doing any operations with packages that
3695 # have invalid metadata. It would be unsafe at
3696 # least because it could trigger unhandled
3697 # exceptions in places like check_required_use().
3698 masked_packages.append(
3699 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3701 if not atom_set.findAtomForPackage(pkg,
3702 modified_use=self._pkg_use_enabled(pkg)):
3704 if pkg in self._dynamic_config._runtime_pkg_mask:
3705 backtrack_reasons = \
3706 self._dynamic_config._runtime_pkg_mask[pkg]
3707 mreasons.append('backtracking: %s' % \
3708 ', '.join(sorted(backtrack_reasons)))
3709 backtrack_mask = True
3710 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3711 modified_use=self._pkg_use_enabled(pkg)):
3712 mreasons = ["exclude option"]
3714 masked_pkg_instances.add(pkg)
3715 if atom.unevaluated_atom.use:
3717 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3718 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3719 missing_use.append(pkg)
3720 if atom_set_with_use.findAtomForPackage(pkg):
3721 autounmask_broke_use_dep = True
3725 writemsg("violated_conditionals raised " + \
3726 "InvalidAtom: '%s' parent: %s" % \
3727 (atom, myparent), noiselevel=-1)
3729 if not mreasons and \
3731 pkg._metadata.get("REQUIRED_USE") and \
3732 eapi_has_required_use(pkg.eapi):
3733 if not check_required_use(
3734 pkg._metadata["REQUIRED_USE"],
3735 self._pkg_use_enabled(pkg),
3736 pkg.iuse.is_valid_flag,
3738 required_use_unsatisfied.append(pkg)
3740 root_slot = (pkg.root, pkg.slot_atom)
3741 if pkg.built and root_slot in self._rebuild.rebuild_list:
3742 mreasons = ["need to rebuild from source"]
3743 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3744 mreasons = ["need to rebuild from source"]
3745 elif pkg.built and not mreasons:
3746 mreasons = ["use flag configuration mismatch"]
3747 masked_packages.append(
3748 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3752 raise self._backtrack_mask()
3756 if check_autounmask_breakage:
3757 if autounmask_broke_use_dep:
3758 raise self._autounmask_breakage()
3762 missing_use_reasons = []
3763 missing_iuse_reasons = []
3764 for pkg in missing_use:
3765 use = self._pkg_use_enabled(pkg)
3767 #Use the unevaluated atom here, because some flags might have gone
3768 #lost during evaluation.
3769 required_flags = atom.unevaluated_atom.use.required
3770 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3774 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3775 missing_iuse_reasons.append((pkg, mreasons))
3777 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3778 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3780 untouchable_flags = \
3781 frozenset(chain(pkg.use.mask, pkg.use.force))
3782 if any(x in untouchable_flags for x in
3783 chain(need_enable, need_disable)):
3786 missing_use_adjustable.add(pkg)
3787 required_use = pkg._metadata.get("REQUIRED_USE")
3788 required_use_warning = ""
3790 old_use = self._pkg_use_enabled(pkg)
3791 new_use = set(self._pkg_use_enabled(pkg))
3792 for flag in need_enable:
3794 for flag in need_disable:
3795 new_use.discard(flag)
3796 if check_required_use(required_use, old_use,
3797 pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
3798 and not check_required_use(required_use, new_use,
3799 pkg.iuse.is_valid_flag, eapi=pkg.eapi):
3800 required_use_warning = ", this change violates use flag constraints " + \
3801 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3803 if need_enable or need_disable:
3805 changes.extend(colorize("red", "+" + x) \
3806 for x in need_enable)
3807 changes.extend(colorize("blue", "-" + x) \
3808 for x in need_disable)
3809 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3810 missing_use_reasons.append((pkg, mreasons))
3812 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3813 # Lets see if the violated use deps are conditional.
3814 # If so, suggest to change them on the parent.
3816 # If the child package is masked then a change to
3817 # parent USE is not a valid solution (a normal mask
3818 # message should be displayed instead).
3819 if pkg in masked_pkg_instances:
3823 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3824 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3825 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3826 #all violated use deps are conditional
3828 conditional = violated_atom.use.conditional
3829 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3830 conditional.enabled, conditional.disabled))
3832 untouchable_flags = \
3833 frozenset(chain(myparent.use.mask, myparent.use.force))
3834 if any(x in untouchable_flags for x in involved_flags):
3837 required_use = myparent._metadata.get("REQUIRED_USE")
3838 required_use_warning = ""
3840 old_use = self._pkg_use_enabled(myparent)
3841 new_use = set(self._pkg_use_enabled(myparent))
3842 for flag in involved_flags:
3844 new_use.discard(flag)
3847 if check_required_use(required_use, old_use,
3848 myparent.iuse.is_valid_flag,
3849 eapi=myparent.eapi) and \
3850 not check_required_use(required_use, new_use,
3851 myparent.iuse.is_valid_flag,
3852 eapi=myparent.eapi):
3853 required_use_warning = ", this change violates use flag constraints " + \
3854 "defined by %s: '%s'" % (myparent.cpv, \
3855 human_readable_required_use(required_use))
3857 for flag in involved_flags:
3858 if flag in self._pkg_use_enabled(myparent):
3859 changes.append(colorize("blue", "-" + flag))
3861 changes.append(colorize("red", "+" + flag))
3862 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3863 if (myparent, mreasons) not in missing_use_reasons:
3864 missing_use_reasons.append((myparent, mreasons))
3866 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3867 in missing_use_reasons if pkg not in masked_pkg_instances]
3869 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3870 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3872 show_missing_use = False
3873 if unmasked_use_reasons:
3874 # Only show the latest version.
3875 show_missing_use = []
3877 parent_reason = None
3878 for pkg, mreasons in unmasked_use_reasons:
3880 if parent_reason is None:
3881 #This happens if a use change on the parent
3882 #leads to a satisfied conditional use dep.
3883 parent_reason = (pkg, mreasons)
3884 elif pkg_reason is None:
3885 #Don't rely on the first pkg in unmasked_use_reasons,
3886 #being the highest version of the dependency.
3887 pkg_reason = (pkg, mreasons)
3889 show_missing_use.append(pkg_reason)
3891 show_missing_use.append(parent_reason)
3893 elif unmasked_iuse_reasons:
3894 masked_with_iuse = False
3895 for pkg in masked_pkg_instances:
3896 #Use atom.unevaluated here, because some flags might have gone
3897 #lost during evaluation.
3898 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3899 # Package(s) with required IUSE are masked,
3900 # so display a normal masking message.
3901 masked_with_iuse = True
3903 if not masked_with_iuse:
3904 show_missing_use = unmasked_iuse_reasons
3906 if required_use_unsatisfied:
3907 # If there's a higher unmasked version in missing_use_adjustable
3908 # then we want to show that instead.
3909 for pkg in missing_use_adjustable:
3910 if pkg not in masked_pkg_instances and \
3911 pkg > required_use_unsatisfied[0]:
3912 required_use_unsatisfied = False
3917 if show_req_use is None and required_use_unsatisfied:
3918 # We have an unmasked package that only requires USE adjustment
3919 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3920 # that the user wants the latest version, so only the first
3921 # instance is displayed.
3922 show_req_use = required_use_unsatisfied[0]
3924 if show_req_use is not None:
3927 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3928 writemsg("\n!!! " + \
3929 colorize("BAD", "The ebuild selected to satisfy ") + \
3930 colorize("INFORM", xinfo) + \
3931 colorize("BAD", " has unmet requirements.") + "\n",
3933 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3934 writemsg("- %s %s\n" % (output_cpv, use_display),
3936 writemsg("\n The following REQUIRED_USE flag constraints " + \
3937 "are unsatisfied:\n", noiselevel=-1)
3938 reduced_noise = check_required_use(
3939 pkg._metadata["REQUIRED_USE"],
3940 self._pkg_use_enabled(pkg),
3941 pkg.iuse.is_valid_flag,
3942 eapi=pkg.eapi).tounicode()
3943 writemsg(" %s\n" % \
3944 human_readable_required_use(reduced_noise),
3946 normalized_required_use = \
3947 " ".join(pkg._metadata["REQUIRED_USE"].split())
3948 if reduced_noise != normalized_required_use:
3949 writemsg("\n The above constraints " + \
3950 "are a subset of the following complete expression:\n",
3952 writemsg(" %s\n" % \
3953 human_readable_required_use(normalized_required_use),
3955 writemsg("\n", noiselevel=-1)
3957 elif show_missing_use:
3958 writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3959 writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3960 for pkg, mreasons in show_missing_use:
3961 writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3963 elif masked_packages:
3964 writemsg("\n!!! " + \
3965 colorize("BAD", "All ebuilds that could satisfy ") + \
3966 colorize("INFORM", xinfo) + \
3967 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3968 writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3969 have_eapi_mask = show_masked_packages(masked_packages)
3971 writemsg("\n", noiselevel=-1)
3972 msg = ("The current version of portage supports " + \
3973 "EAPI '%s'. You must upgrade to a newer version" + \
3974 " of portage before EAPI masked packages can" + \
3975 " be installed.") % portage.const.EAPI
3976 writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3977 writemsg("\n", noiselevel=-1)
3981 if not atom.cp.startswith("null/"):
3982 for pkg in self._iter_match_pkgs_any(
3983 root_config, Atom(atom.cp)):
3987 writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3988 if isinstance(myparent, AtomArg) and \
3990 self._frozen_config.myopts.get(
3991 "--misspell-suggestions", "y") != "n":
3993 writemsg("\nemerge: searching for similar names..."
3997 if "--usepkgonly" not in self._frozen_config.myopts:
3999 if "--usepkg" in self._frozen_config.myopts:
4002 matches = similar_name_search(dbs, atom)
4004 if len(matches) == 1:
4005 writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
4007 elif len(matches) > 1:
4009 "\nemerge: Maybe you meant any of these: %s?\n" % \
4010 (", ".join(matches),), noiselevel=-1)
4012 # Generally, this would only happen if
4013 # all dbapis are empty.
4014 writemsg(" nothing similar found.\n"
4017 if not isinstance(myparent, AtomArg):
4018 # It's redundant to show parent for AtomArg since
4019 # it's the same as 'xinfo' displayed above.
4020 dep_chain = self._get_dep_chain(myparent, atom)
4021 for node, node_type in dep_chain:
4022 msg.append('(dependency required by "%s" [%s])' % \
4023 (colorize('INFORM', "%s" % (node)), node_type))
4026 writemsg("\n".join(msg), noiselevel=-1)
4027 writemsg("\n", noiselevel=-1)
4031 writemsg("\n", noiselevel=-1)
4033 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
4034 for db, pkg_type, built, installed, db_keys in \
4035 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
4036 for pkg in self._iter_match_pkgs(root_config,
4037 pkg_type, atom, onlydeps=onlydeps):
4040 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
4042 Iterate over Package instances of pkg_type matching the given atom.
4043 This does not check visibility and it also does not match USE for
4044 unbuilt ebuilds since USE are lazily calculated after visibility
4045 checks (to avoid the expense when possible).
4048 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
4049 atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
4050 cp_list = db.cp_list(atom_exp.cp)
4051 matched_something = False
4052 installed = pkg_type == 'installed'
4055 atom_set = InternalPackageSet(initial_atoms=(atom,),
4057 if atom.repo is None and hasattr(db, "getRepositories"):
4058 repo_list = db.getRepositories()
4060 repo_list = [atom.repo]
4065 # Call match_from_list on one cpv at a time, in order
4066 # to avoid unnecessary match_from_list comparisons on
4067 # versions that are never yielded from this method.
4068 if not match_from_list(atom_exp, [cpv]):
4070 for repo in repo_list:
4073 pkg = self._pkg(cpv, pkg_type, root_config,
4074 installed=installed, onlydeps=onlydeps, myrepo=repo)
4075 except portage.exception.PackageNotFound:
4078 # A cpv can be returned from dbapi.match() as an
4079 # old-style virtual match even in cases when the
4080 # package does not actually PROVIDE the virtual.
4081 # Filter out any such false matches here.
4083 # Make sure that cpv from the current repo satisfies the atom.
4084 # This might not be the case if there are several repos with
4085 # the same cpv, but different metadata keys, like SLOT.
4086 # Also, parts of the match that require metadata access
4087 # are deferred until we have cached the metadata in a
4089 if not atom_set.findAtomForPackage(pkg,
4090 modified_use=self._pkg_use_enabled(pkg)):
4092 matched_something = True
4095 # USE=multislot can make an installed package appear as if
4096 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
4097 # won't do any good as long as USE=multislot is enabled since
4098 # the newly built package still won't have the expected slot.
4099 # Therefore, assume that such SLOT dependencies are already
4100 # satisfied rather than forcing a rebuild.
4101 if not matched_something and installed and \
4102 atom.slot is not None and not atom.slot_operator_built:
4104 if "remove" in self._dynamic_config.myparams:
4105 # We need to search the portdbapi, which is not in our
4106 # normal dbs list, in order to find the real SLOT.
4107 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
4108 db_keys = list(portdb._aux_cache_keys)
4109 dbs = [(portdb, "ebuild", False, False, db_keys)]
4111 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
4113 cp_list = db.cp_list(atom_exp.cp)
4115 atom_set = InternalPackageSet(
4116 initial_atoms=(atom.without_slot,), allow_repo=True)
4117 atom_exp_without_slot = atom_exp.without_slot
4120 if not match_from_list(atom_exp_without_slot, [cpv]):
4122 slot_available = False
4123 for other_db, other_type, other_built, \
4124 other_installed, other_keys in dbs:
4126 if portage.dep._match_slot(atom,
4127 other_db._pkg_str(_unicode(cpv), None)):
4128 slot_available = True
4130 except (KeyError, InvalidData):
4132 if not slot_available:
4134 inst_pkg = self._pkg(cpv, "installed",
4135 root_config, installed=installed, myrepo=atom.repo)
4136 # Remove the slot from the atom and verify that
4137 # the package matches the resulting atom.
4138 if atom_set.findAtomForPackage(inst_pkg):
4142 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
4143 cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
4144 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
4147 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
4148 self._dynamic_config._highest_pkg_cache[cache_key] = ret
4151 if self._pkg_visibility_check(pkg) and \
4152 not (pkg.installed and pkg.masks):
4153 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
4156 def _want_installed_pkg(self, pkg):
4158 Given an installed package returned from select_pkg, return
4159 True if the user has not explicitly requested for this package
4160 to be replaced (typically via an atom on the command line).
4162 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
4163 modified_use=self._pkg_use_enabled(pkg)):
4168 for arg, atom in self._iter_atoms_for_pkg(pkg):
4169 if arg.force_reinstall:
4171 except InvalidDependString:
4174 if "selective" in self._dynamic_config.myparams:
4179 def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
4182 pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
4183 except portage.exception.PackageNotFound:
4184 pkg_eb_visible = False
4185 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
4186 "ebuild", Atom("=%s" % (pkg.cpv,))):
4187 if self._pkg_visibility_check(pkg_eb, autounmask_level):
4188 pkg_eb_visible = True
4190 if not pkg_eb_visible:
4193 if not self._pkg_visibility_check(pkg_eb, autounmask_level):
4198 def _equiv_binary_installed(self, pkg):
4199 build_time = pkg.build_time
4204 inst_pkg = self._pkg(pkg.cpv, "installed",
4205 pkg.root_config, installed=True)
4206 except PackageNotFound:
4209 return build_time == inst_pkg.build_time
4211 class _AutounmaskLevel(object):
4212 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
4213 "allow_missing_keywords", "allow_unmasks")
4216 self.allow_use_changes = False
4217 self.allow_license_changes = False
4218 self.allow_unstable_keywords = False
4219 self.allow_missing_keywords = False
4220 self.allow_unmasks = False
4222 def _autounmask_levels(self):
4224 Iterate over the different allowed things to unmask.
4228 2. USE + ~arch + license
4229 3. USE + ~arch + license + missing keywords
4230 4. USE + ~arch + license + masks
4231 5. USE + ~arch + license + missing keywords + masks
4234 * Do least invasive changes first.
4235 * Try unmasking alone before unmasking + missing keywords
4236 to avoid -9999 versions if possible
4239 if self._dynamic_config._autounmask is not True:
4242 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
4243 autounmask_level = self._AutounmaskLevel()
4245 autounmask_level.allow_use_changes = True
4246 yield autounmask_level
4248 autounmask_level.allow_license_changes = True
4249 yield autounmask_level
4251 for only_use_changes in (False,):
4253 autounmask_level.allow_unstable_keywords = (not only_use_changes)
4254 autounmask_level.allow_license_changes = (not only_use_changes)
4256 for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
4258 if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
4261 autounmask_level.allow_missing_keywords = missing_keyword
4262 autounmask_level.allow_unmasks = unmask
4264 yield autounmask_level
4267 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
4268 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
4270 default_selection = (pkg, existing)
4273 if pkg is not None and \
4275 not self._want_installed_pkg(pkg):
4278 if self._dynamic_config._autounmask is True:
4281 # Temporarily reset _need_restart state, in order to
4282 # avoid interference as reported in bug #459832.
4283 earlier_need_restart = self._dynamic_config._need_restart
4284 self._dynamic_config._need_restart = False
4286 for autounmask_level in self._autounmask_levels():
4291 self._wrapped_select_pkg_highest_available_imp(
4292 root, atom, onlydeps=onlydeps,
4293 autounmask_level=autounmask_level)
4297 if self._dynamic_config._need_restart:
4300 if earlier_need_restart:
4301 self._dynamic_config._need_restart = True
4304 # This ensures that we can fall back to an installed package
4305 # that may have been rejected in the autounmask path above.
4306 return default_selection
4308 return pkg, existing
4310 def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
4315 if trust_graph and pkg in self._dynamic_config.digraph:
4316 # Sometimes we need to temporarily disable
4317 # dynamic_config._autounmask, but for overall
4318 # consistency in dependency resolution, in most
4319 # cases we want to treat packages in the graph
4320 # as though they are visible.
4323 if not self._dynamic_config._autounmask or autounmask_level is None:
4326 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4327 root_config = self._frozen_config.roots[pkg.root]
4328 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4330 masked_by_unstable_keywords = False
4331 masked_by_missing_keywords = False
4332 missing_licenses = None
4333 masked_by_something_else = False
4334 masked_by_p_mask = False
4336 for reason in mreasons:
4337 hint = reason.unmask_hint
4340 masked_by_something_else = True
4341 elif hint.key == "unstable keyword":
4342 masked_by_unstable_keywords = True
4343 if hint.value == "**":
4344 masked_by_missing_keywords = True
4345 elif hint.key == "p_mask":
4346 masked_by_p_mask = True
4347 elif hint.key == "license":
4348 missing_licenses = hint.value
4350 masked_by_something_else = True
4352 if masked_by_something_else:
4355 if pkg in self._dynamic_config._needed_unstable_keywords:
4356 #If the package is already keyworded, remove the mask.
4357 masked_by_unstable_keywords = False
4358 masked_by_missing_keywords = False
4360 if pkg in self._dynamic_config._needed_p_mask_changes:
4361 #If the package is already keyworded, remove the mask.
4362 masked_by_p_mask = False
4364 if missing_licenses:
4365 #If the needed licenses are already unmasked, remove the mask.
4366 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
4368 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
4369 #Package has already been unmasked.
4372 if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
4373 (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
4374 (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
4375 (missing_licenses and not autounmask_level.allow_license_changes):
4376 #We are not allowed to do the needed changes.
4379 if masked_by_unstable_keywords:
4380 self._dynamic_config._needed_unstable_keywords.add(pkg)
4381 backtrack_infos = self._dynamic_config._backtrack_infos
4382 backtrack_infos.setdefault("config", {})
4383 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
4384 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
4386 if masked_by_p_mask:
4387 self._dynamic_config._needed_p_mask_changes.add(pkg)
4388 backtrack_infos = self._dynamic_config._backtrack_infos
4389 backtrack_infos.setdefault("config", {})
4390 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
4391 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
4393 if missing_licenses:
4394 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
4395 backtrack_infos = self._dynamic_config._backtrack_infos
4396 backtrack_infos.setdefault("config", {})
4397 backtrack_infos["config"].setdefault("needed_license_changes", set())
4398 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
4402 def _pkg_use_enabled(self, pkg, target_use=None):
4404 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
4405 If target_use is given, the need changes are computed to make the package useable.
4406 Example: target_use = { "foo": True, "bar": False }
4407 The flags target_use must be in the pkg's IUSE.
4410 return pkg.use.enabled
4411 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
4413 if target_use is None:
4414 if needed_use_config_change is None:
4415 return pkg.use.enabled
4417 return needed_use_config_change[0]
4419 if needed_use_config_change is not None:
4420 old_use = needed_use_config_change[0]
4422 old_changes = needed_use_config_change[1]
4423 new_changes = old_changes.copy()
4425 old_use = pkg.use.enabled
4430 for flag, state in target_use.items():
4431 real_flag = pkg.iuse.get_real_flag(flag)
4432 if real_flag is None:
4433 # Triggered by use-dep defaults.
4436 if real_flag not in old_use:
4437 if new_changes.get(real_flag) == False:
4439 new_changes[real_flag] = True
4442 if real_flag in old_use:
4443 if new_changes.get(real_flag) == True:
4445 new_changes[real_flag] = False
4446 new_use.update(old_use.difference(target_use))
4448 def want_restart_for_use_change(pkg, new_use):
4449 if pkg not in self._dynamic_config.digraph.nodes:
4452 for key in Package._dep_keys + ("LICENSE",):
4453 dep = pkg._metadata[key]
4454 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4455 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4457 if old_val != new_val:
4460 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4461 if not parent_atoms:
4464 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
4465 for ppkg, atom in parent_atoms:
4466 if not atom.use or \
4467 not any(x in atom.use.required for x in changes):
4474 if new_changes != old_changes:
4475 #Don't do the change if it violates REQUIRED_USE.
4476 required_use = pkg._metadata.get("REQUIRED_USE")
4477 if required_use and check_required_use(required_use, old_use,
4478 pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
4479 not check_required_use(required_use, new_use,
4480 pkg.iuse.is_valid_flag, eapi=pkg.eapi):
4483 if any(x in pkg.use.mask for x in new_changes) or \
4484 any(x in pkg.use.force for x in new_changes):
4487 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
4488 backtrack_infos = self._dynamic_config._backtrack_infos
4489 backtrack_infos.setdefault("config", {})
4490 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
4491 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
4492 if want_restart_for_use_change(pkg, new_use):
4493 self._dynamic_config._need_restart = True
4496 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
4497 root_config = self._frozen_config.roots[root]
4498 pkgsettings = self._frozen_config.pkgsettings[root]
4499 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
4500 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
4501 # List of acceptable packages, ordered by type preference.
4502 matched_packages = []
4503 matched_pkgs_ignore_use = []
4504 highest_version = None
4505 if not isinstance(atom, portage.dep.Atom):
4506 atom = portage.dep.Atom(atom)
4508 have_new_virt = atom_cp.startswith("virtual/") and \
4509 self._have_new_virt(root, atom_cp)
4510 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
4511 existing_node = None
4513 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
4514 usepkg = "--usepkg" in self._frozen_config.myopts
4515 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
4516 empty = "empty" in self._dynamic_config.myparams
4517 selective = "selective" in self._dynamic_config.myparams
4519 avoid_update = "--update" not in self._frozen_config.myopts
4520 dont_miss_updates = "--update" in self._frozen_config.myopts
4521 use_ebuild_visibility = self._frozen_config.myopts.get(
4522 '--use-ebuild-visibility', 'n') != 'n'
4523 reinstall_atoms = self._frozen_config.reinstall_atoms
4524 usepkg_exclude = self._frozen_config.usepkg_exclude
4525 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
4527 # Behavior of the "selective" parameter depends on
4528 # whether or not a package matches an argument atom.
4529 # If an installed package provides an old-style
4530 # virtual that is no longer provided by an available
4531 # package, the installed package may match an argument
4532 # atom even though none of the available packages do.
4533 # Therefore, "selective" logic does not consider
4534 # whether or not an installed package matches an
4535 # argument atom. It only considers whether or not
4536 # available packages match argument atoms, which is
4537 # represented by the found_available_arg flag.
4538 found_available_arg = False
4539 packages_with_invalid_use_config = []
4540 for find_existing_node in True, False:
4543 for db, pkg_type, built, installed, db_keys in dbs:
4546 if installed and not find_existing_node:
4547 want_reinstall = reinstall or empty or \
4548 (found_available_arg and not selective)
4549 if want_reinstall and matched_packages:
4552 # Ignore USE deps for the initial match since we want to
4553 # ensure that updates aren't missed solely due to the user's
4554 # USE configuration.
4555 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
4557 if pkg.cp != atom_cp and have_new_virt:
4558 # pull in a new-style virtual instead
4560 if pkg in self._dynamic_config._runtime_pkg_mask:
4561 # The package has been masked by the backtracking logic
4563 root_slot = (pkg.root, pkg.slot_atom)
4564 if pkg.built and root_slot in self._rebuild.rebuild_list:
4566 if (pkg.installed and
4567 root_slot in self._rebuild.reinstall_list):
4570 if not pkg.installed and \
4571 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
4572 modified_use=self._pkg_use_enabled(pkg)):
4575 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
4576 modified_use=self._pkg_use_enabled(pkg)):
4579 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
4580 modified_use=self._pkg_use_enabled(pkg))
4582 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
4583 (not pkg.installed or dont_miss_updates):
4584 # Check if a higher version was rejected due to user
4585 # USE configuration. The packages_with_invalid_use_config
4586 # list only contains unbuilt ebuilds since USE can't
4587 # be changed for built packages.
4588 higher_version_rejected = False
4589 repo_priority = pkg.repo_priority
4590 for rejected in packages_with_invalid_use_config:
4591 if rejected.cp != pkg.cp:
4594 higher_version_rejected = True
4596 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
4597 # If version is identical then compare
4598 # repo priority (see bug #350254).
4599 rej_repo_priority = rejected.repo_priority
4600 if rej_repo_priority is not None and \
4601 (repo_priority is None or
4602 rej_repo_priority > repo_priority):
4603 higher_version_rejected = True
4605 if higher_version_rejected:
4609 reinstall_for_flags = None
4611 if not pkg.installed or \
4612 (matched_packages and not avoid_update):
4613 # Only enforce visibility on installed packages
4614 # if there is at least one other visible package
4615 # available. By filtering installed masked packages
4616 # here, packages that have been masked since they
4617 # were installed can be automatically downgraded
4618 # to an unmasked version. NOTE: This code needs to
4619 # be consistent with masking behavior inside
4620 # _dep_check_composite_db, in order to prevent
4621 # incorrect choices in || deps like bug #351828.
4623 if not self._pkg_visibility_check(pkg, autounmask_level):
4626 # Enable upgrade or downgrade to a version
4627 # with visible KEYWORDS when the installed
4628 # version is masked by KEYWORDS, but never
4629 # reinstall the same exact version only due
4630 # to a KEYWORDS mask. See bug #252167.
4632 if pkg.type_name != "ebuild" and matched_packages:
4633 # Don't re-install a binary package that is
4634 # identical to the currently installed package
4635 # (see bug #354441).
4636 identical_binary = False
4637 if usepkg and pkg.installed:
4638 for selected_pkg in matched_packages:
4639 if selected_pkg.type_name == "binary" and \
4640 selected_pkg.cpv == pkg.cpv and \
4641 selected_pkg.build_time == \
4643 identical_binary = True
4646 if not identical_binary:
4647 # If the ebuild no longer exists or it's
4648 # keywords have been dropped, reject built
4649 # instances (installed or binary).
4650 # If --usepkgonly is enabled, assume that
4651 # the ebuild status should be ignored.
4652 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
4653 if pkg.installed and pkg.masks:
4655 elif not self._equiv_ebuild_visible(pkg,
4656 autounmask_level=autounmask_level):
4659 # Calculation of USE for unbuilt ebuilds is relatively
4660 # expensive, so it is only performed lazily, after the
4661 # above visibility checks are complete.
4665 for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
4666 if myarg.force_reinstall:
4669 except InvalidDependString:
4671 # masked by corruption
4673 if not installed and myarg:
4674 found_available_arg = True
4676 if atom.unevaluated_atom.use:
4677 #Make sure we don't miss a 'missing IUSE'.
4678 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
4679 # Don't add this to packages_with_invalid_use_config
4680 # since IUSE cannot be adjusted by the user.
4685 matched_pkgs_ignore_use.append(pkg)
4686 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
4688 for flag in atom.use.enabled:
4689 target_use[flag] = True
4690 for flag in atom.use.disabled:
4691 target_use[flag] = False
4692 use = self._pkg_use_enabled(pkg, target_use)
4694 use = self._pkg_use_enabled(pkg)
4697 can_adjust_use = not pkg.built
4698 is_valid_flag = pkg.iuse.is_valid_flag
4699 missing_enabled = frozenset(x for x in
4700 atom.use.missing_enabled if not is_valid_flag(x))
4701 missing_disabled = frozenset(x for x in
4702 atom.use.missing_disabled if not is_valid_flag(x))
4704 if atom.use.enabled:
4705 if any(x in atom.use.enabled for x in missing_disabled):
4707 can_adjust_use = False
4708 need_enabled = atom.use.enabled.difference(use)
4710 need_enabled = need_enabled.difference(missing_enabled)
4714 if any(x in pkg.use.mask for x in need_enabled):
4715 can_adjust_use = False
4717 if atom.use.disabled:
4718 if any(x in atom.use.disabled for x in missing_enabled):
4720 can_adjust_use = False
4721 need_disabled = atom.use.disabled.intersection(use)
4723 need_disabled = need_disabled.difference(missing_disabled)
4727 if any(x in pkg.use.force and x not in
4728 pkg.use.mask for x in need_disabled):
4729 can_adjust_use = False
4733 # Above we must ensure that this package has
4734 # absolutely no use.force, use.mask, or IUSE
4735 # issues that the user typically can't make
4736 # adjustments to solve (see bug #345979).
4737 # FIXME: Conditional USE deps complicate
4738 # issues. This code currently excludes cases
4739 # in which the user can adjust the parent
4740 # package's USE in order to satisfy the dep.
4741 packages_with_invalid_use_config.append(pkg)
4744 if pkg.cp == atom_cp:
4745 if highest_version is None:
4746 highest_version = pkg
4747 elif pkg > highest_version:
4748 highest_version = pkg
4749 # At this point, we've found the highest visible
4750 # match from the current repo. Any lower versions
4751 # from this repo are ignored, so this so the loop
4752 # will always end with a break statement below
4754 if find_existing_node:
4755 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4759 # Use PackageSet.findAtomForPackage()
4760 # for PROVIDE support.
4761 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
4762 if highest_version and \
4763 e_pkg.cp == atom_cp and \
4764 e_pkg < highest_version and \
4765 e_pkg.slot_atom != highest_version.slot_atom:
4766 # There is a higher version available in a
4767 # different slot, so this existing node is
4771 matched_packages.append(e_pkg)
4772 existing_node = e_pkg
4774 # Compare built package to current config and
4775 # reject the built package if necessary.
4776 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4777 ("--newuse" in self._frozen_config.myopts or \
4778 "--reinstall" in self._frozen_config.myopts or \
4779 (not installed and self._dynamic_config.myparams.get(
4780 "binpkg_respect_use") in ("y", "auto"))):
4781 iuses = pkg.iuse.all
4782 old_use = self._pkg_use_enabled(pkg)
4784 pkgsettings.setcpv(myeb)
4786 pkgsettings.setcpv(pkg)
4787 now_use = pkgsettings["PORTAGE_USE"].split()
4788 forced_flags = set()
4789 forced_flags.update(pkgsettings.useforce)
4790 forced_flags.update(pkgsettings.usemask)
4792 if myeb and not usepkgonly and not useoldpkg:
4793 cur_iuse = myeb.iuse.all
4794 reinstall_for_flags = self._reinstall_for_flags(pkg,
4795 forced_flags, old_use, iuses, now_use, cur_iuse)
4796 if reinstall_for_flags:
4797 if not pkg.installed:
4798 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4800 # Compare current config to installed package
4801 # and do not reinstall if possible.
4802 if not installed and not useoldpkg and \
4803 ("--newuse" in self._frozen_config.myopts or \
4804 "--reinstall" in self._frozen_config.myopts) and \
4805 cpv in vardb.match(atom):
4806 forced_flags = set()
4807 forced_flags.update(pkg.use.force)
4808 forced_flags.update(pkg.use.mask)
4809 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4810 old_use = inst_pkg.use.enabled
4811 old_iuse = inst_pkg.iuse.all
4812 cur_use = self._pkg_use_enabled(pkg)
4813 cur_iuse = pkg.iuse.all
4814 reinstall_for_flags = \
4815 self._reinstall_for_flags(pkg,
4816 forced_flags, old_use, old_iuse,
4818 if reinstall_for_flags:
4820 if reinstall_atoms.findAtomForPackage(pkg, \
4821 modified_use=self._pkg_use_enabled(pkg)):
4826 matched_oldpkg.append(pkg)
4827 matched_packages.append(pkg)
4828 if reinstall_for_flags:
4829 self._dynamic_config._reinstall_nodes[pkg] = \
4833 if not matched_packages:
4836 if "--debug" in self._frozen_config.myopts:
4837 for pkg in matched_packages:
4838 portage.writemsg("%s %s%s%s\n" % \
4839 ((pkg.type_name + ":").rjust(10),
4840 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4842 # Filter out any old-style virtual matches if they are
4843 # mixed with new-style virtual matches.
4845 if len(matched_packages) > 1 and \
4846 "virtual" == portage.catsplit(cp)[0]:
4847 for pkg in matched_packages:
4850 # Got a new-style virtual, so filter
4851 # out any old-style virtuals.
4852 matched_packages = [pkg for pkg in matched_packages \
4856 if existing_node is not None and \
4857 existing_node in matched_packages:
4858 return existing_node, existing_node
4860 if len(matched_packages) > 1:
4861 if rebuilt_binaries:
4865 for pkg in matched_packages:
4871 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4873 if built_pkg is not None and inst_pkg is not None:
4874 # Only reinstall if binary package BUILD_TIME is
4875 # non-empty, in order to avoid cases like to
4876 # bug #306659 where BUILD_TIME fields are missing
4877 # in local and/or remote Packages file.
4878 built_timestamp = built_pkg.build_time
4879 installed_timestamp = inst_pkg.build_time
4881 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4883 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4884 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4885 if built_timestamp and \
4886 built_timestamp > installed_timestamp and \
4887 built_timestamp >= minimal_timestamp:
4888 return built_pkg, existing_node
4890 #Don't care if the binary has an older BUILD_TIME than the installed
4891 #package. This is for closely tracking a binhost.
4892 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4894 if built_timestamp and \
4895 built_timestamp != installed_timestamp:
4896 return built_pkg, existing_node
4898 for pkg in matched_packages:
4899 if pkg.installed and pkg.invalid:
4900 matched_packages = [x for x in \
4901 matched_packages if x is not pkg]
4904 for pkg in matched_packages:
4905 if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
4906 return pkg, existing_node
4908 visible_matches = []
4910 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4911 if self._pkg_visibility_check(pkg, autounmask_level)]
4912 if not visible_matches:
4913 visible_matches = [pkg.cpv for pkg in matched_packages \
4914 if self._pkg_visibility_check(pkg, autounmask_level)]
4916 bestmatch = portage.best(visible_matches)
4918 # all are masked, so ignore visibility
4919 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4920 matched_packages = [pkg for pkg in matched_packages \
4921 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4923 # ordered by type preference ("ebuild" type is the last resort)
4924 return matched_packages[-1], existing_node
4926 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4928 Select packages that have already been added to the graph or
4929 those that are installed and have not been scheduled for
4932 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4933 matches = graph_db.match_pkgs(atom)
4936 pkg = matches[-1] # highest match
4937 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4938 return pkg, in_graph
4940 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4942 Select packages that are installed.
4944 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4948 if len(matches) > 1:
4949 matches.reverse() # ascending order
4950 unmasked = [pkg for pkg in matches if \
4951 self._pkg_visibility_check(pkg)]
4953 if len(unmasked) == 1:
4956 # Account for packages with masks (like KEYWORDS masks)
4957 # that are usually ignored in visibility checks for
4958 # installed packages, in order to handle cases like
4960 unmasked = [pkg for pkg in matches if not pkg.masks]
4963 if len(matches) > 1:
4964 # Now account for packages for which existing
4965 # ebuilds are masked or unavailable (bug #445506).
4966 unmasked = [pkg for pkg in matches if
4967 self._equiv_ebuild_visible(pkg)]
4971 pkg = matches[-1] # highest match
4972 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4973 return pkg, in_graph
4975 def _complete_graph(self, required_sets=None):
4977 Add any deep dependencies of required sets (args, system, world) that
4978 have not been pulled into the graph yet. This ensures that the graph
4979 is consistent such that initially satisfied deep dependencies are not
4980 broken in the new graph. Initially unsatisfied dependencies are
4981 irrelevant since we only want to avoid breaking dependencies that are
4982 initially satisfied.
4984 Since this method can consume enough time to disturb users, it is
4985 currently only enabled by the --complete-graph option.
4987 @param required_sets: contains required sets (currently only used
4988 for depclean and prune removal operations)
4989 @type required_sets: dict
4991 if "--buildpkgonly" in self._frozen_config.myopts or \
4992 "recurse" not in self._dynamic_config.myparams:
4995 complete_if_new_use = self._dynamic_config.myparams.get(
4996 "complete_if_new_use", "y") == "y"
4997 complete_if_new_ver = self._dynamic_config.myparams.get(
4998 "complete_if_new_ver", "y") == "y"
4999 rebuild_if_new_slot = self._dynamic_config.myparams.get(
5000 "rebuild_if_new_slot", "y") == "y"
5001 complete_if_new_slot = rebuild_if_new_slot
5003 if "complete" not in self._dynamic_config.myparams and \
5004 (complete_if_new_use or
5005 complete_if_new_ver or complete_if_new_slot):
5006 # Enable complete mode if an installed package will change somehow.
5008 version_change = False
5009 for node in self._dynamic_config.digraph:
5010 if not isinstance(node, Package) or \
5011 node.operation != "merge":
5013 vardb = self._frozen_config.roots[
5014 node.root].trees["vartree"].dbapi
5016 if complete_if_new_use or complete_if_new_ver:
5017 inst_pkg = vardb.match_pkgs(node.slot_atom)
5018 if inst_pkg and inst_pkg[0].cp == node.cp:
5019 inst_pkg = inst_pkg[0]
5020 if complete_if_new_ver:
5021 if inst_pkg < node or node < inst_pkg:
5022 version_change = True
5024 elif not (inst_pkg.slot == node.slot and
5025 inst_pkg.sub_slot == node.sub_slot):
5026 # slot/sub-slot change without revbump gets
5027 # similar treatment to a version change
5028 version_change = True
5031 # Intersect enabled USE with IUSE, in order to
5032 # ignore forced USE from implicit IUSE flags, since
5033 # they're probably irrelevant and they are sensitive
5034 # to use.mask/force changes in the profile.
5035 if complete_if_new_use and \
5036 (node.iuse.all != inst_pkg.iuse.all or
5037 self._pkg_use_enabled(node).intersection(node.iuse.all) !=
5038 self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
5042 if complete_if_new_slot:
5043 cp_list = vardb.match_pkgs(Atom(node.cp))
5044 if (cp_list and cp_list[0].cp == node.cp and
5045 not any(node.slot == pkg.slot and
5046 node.sub_slot == pkg.sub_slot for pkg in cp_list)):
5047 version_change = True
5050 if use_change or version_change:
5051 self._dynamic_config.myparams["complete"] = True
5053 if "complete" not in self._dynamic_config.myparams:
5058 # Put the depgraph into a mode that causes it to only
5059 # select packages that have already been added to the
5060 # graph or those that are installed and have not been
5061 # scheduled for replacement. Also, toggle the "deep"
5062 # parameter so that all dependencies are traversed and
5064 self._dynamic_config._complete_mode = True
5065 self._select_atoms = self._select_atoms_from_graph
5066 if "remove" in self._dynamic_config.myparams:
5067 self._select_package = self._select_pkg_from_installed
5069 self._select_package = self._select_pkg_from_graph
5070 self._dynamic_config._traverse_ignored_deps = True
5071 already_deep = self._dynamic_config.myparams.get("deep") is True
5072 if not already_deep:
5073 self._dynamic_config.myparams["deep"] = True
5075 # Invalidate the package selection cache, since
5076 # _select_package has just changed implementations.
5077 for trees in self._dynamic_config._filtered_trees.values():
5078 trees["porttree"].dbapi._clear_cache()
5080 args = self._dynamic_config._initial_arg_list[:]
5081 for root in self._frozen_config.roots:
5082 if root != self._frozen_config.target_root and \
5083 ("remove" in self._dynamic_config.myparams or
5084 self._frozen_config.myopts.get("--root-deps") is not None):
5085 # Only pull in deps for the relevant root.
5087 depgraph_sets = self._dynamic_config.sets[root]
5088 required_set_names = self._frozen_config._required_set_names.copy()
5089 remaining_args = required_set_names.copy()
5090 if required_sets is None or root not in required_sets:
5093 # Removal actions may override sets with temporary
5094 # replacements that have had atoms removed in order
5095 # to implement --deselect behavior.
5096 required_set_names = set(required_sets[root])
5097 depgraph_sets.sets.clear()
5098 depgraph_sets.sets.update(required_sets[root])
5099 if "remove" not in self._dynamic_config.myparams and \
5100 root == self._frozen_config.target_root and \
5102 remaining_args.difference_update(depgraph_sets.sets)
5103 if not remaining_args and \
5104 not self._dynamic_config._ignored_deps and \
5105 not self._dynamic_config._dep_stack:
5107 root_config = self._frozen_config.roots[root]
5108 for s in required_set_names:
5109 pset = depgraph_sets.sets.get(s)
5111 pset = root_config.sets[s]
5112 atom = SETPREFIX + s
5113 args.append(SetArg(arg=atom, pset=pset,
5114 root_config=root_config))
5116 self._set_args(args)
5117 for arg in self._expand_set_args(args, add_to_digraph=True):
5118 for atom in arg.pset.getAtoms():
5119 self._dynamic_config._dep_stack.append(
5120 Dependency(atom=atom, root=arg.root_config.root,
5124 if self._dynamic_config._ignored_deps:
5125 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
5126 self._dynamic_config._ignored_deps = []
5127 if not self._create_graph(allow_unsatisfied=True):
5129 # Check the unsatisfied deps to see if any initially satisfied deps
5130 # will become unsatisfied due to an upgrade. Initially unsatisfied
5131 # deps are irrelevant since we only want to avoid breaking deps
5132 # that are initially satisfied.
5133 while self._dynamic_config._unsatisfied_deps:
5134 dep = self._dynamic_config._unsatisfied_deps.pop()
5135 vardb = self._frozen_config.roots[
5136 dep.root].trees["vartree"].dbapi
5137 matches = vardb.match_pkgs(dep.atom)
5139 self._dynamic_config._initially_unsatisfied_deps.append(dep)
5141 # An scheduled installation broke a deep dependency.
5142 # Add the installed package to the graph so that it
5143 # will be appropriately reported as a slot collision
5144 # (possibly solvable via backtracking).
5145 pkg = matches[-1] # highest match
5146 if not self._add_pkg(pkg, dep):
5148 if not self._create_graph(allow_unsatisfied=True):
5152 def _pkg(self, cpv, type_name, root_config, installed=False,
5153 onlydeps=False, myrepo = None):
5155 Get a package instance from the cache, or create a new
5156 one if necessary. Raises PackageNotFound from aux_get if it
5157 failures for some reason (package does not exist or is
5161 # Ensure that we use the specially optimized RootConfig instance
5162 # that refers to FakeVartree instead of the real vartree.
5163 root_config = self._frozen_config.roots[root_config.root]
5164 pkg = self._frozen_config._pkg_cache.get(
5165 Package._gen_hash_key(cpv=cpv, type_name=type_name,
5166 repo_name=myrepo, root_config=root_config,
5167 installed=installed, onlydeps=onlydeps))
5168 if pkg is None and onlydeps and not installed:
5169 # Maybe it already got pulled in as a "merge" node.
5170 pkg = self._dynamic_config.mydbapi[root_config.root].get(
5171 Package._gen_hash_key(cpv=cpv, type_name=type_name,
5172 repo_name=myrepo, root_config=root_config,
5173 installed=installed, onlydeps=False))
5176 tree_type = self.pkg_tree_map[type_name]
5177 db = root_config.trees[tree_type].dbapi
5178 db_keys = list(self._frozen_config._trees_orig[root_config.root][
5179 tree_type].dbapi._aux_cache_keys)
5182 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
5184 raise portage.exception.PackageNotFound(cpv)
5186 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
5187 installed=installed, metadata=metadata, onlydeps=onlydeps,
5188 root_config=root_config, type_name=type_name)
5190 self._frozen_config._pkg_cache[pkg] = pkg
5192 if not self._pkg_visibility_check(pkg) and \
5193 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
5194 slot_key = (pkg.root, pkg.slot_atom)
5195 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
5196 if other_pkg is None or pkg > other_pkg:
5197 self._frozen_config._highest_license_masked[slot_key] = pkg
5201 def _validate_blockers(self):
5202 """Remove any blockers from the digraph that do not match any of the
5203 packages within the graph. If necessary, create hard deps to ensure
5204 correct merge order such that mutually blocking packages are never
5205 installed simultaneously. Also add runtime blockers from all installed
5206 packages if any of them haven't been added already (bug 128809)."""
5208 if "--buildpkgonly" in self._frozen_config.myopts or \
5209 "--nodeps" in self._frozen_config.myopts:
5213 # Pull in blockers from all installed packages that haven't already
5214 # been pulled into the depgraph, in order to ensure that they are
5215 # respected (bug 128809). Due to the performance penalty that is
5216 # incurred by all the additional dep_check calls that are required,
5217 # blockers returned from dep_check are cached on disk by the
5218 # BlockerCache class.
5220 # For installed packages, always ignore blockers from DEPEND since
5221 # only runtime dependencies should be relevant for packages that
5222 # are already built.
5223 dep_keys = Package._runtime_keys
5224 for myroot in self._frozen_config.trees:
5226 if self._frozen_config.myopts.get("--root-deps") is not None and \
5227 myroot != self._frozen_config.target_root:
5230 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
5231 pkgsettings = self._frozen_config.pkgsettings[myroot]
5232 root_config = self._frozen_config.roots[myroot]
5233 final_db = self._dynamic_config.mydbapi[myroot]
5235 blocker_cache = BlockerCache(myroot, vardb)
5236 stale_cache = set(blocker_cache)
5239 stale_cache.discard(cpv)
5240 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
5242 pkg in self._dynamic_config._traversed_pkg_deps
5244 # Check for masked installed packages. Only warn about
5245 # packages that are in the graph in order to avoid warning
5246 # about those that will be automatically uninstalled during
5247 # the merge process or by --depclean. Always warn about
5248 # packages masked by license, since the user likely wants
5249 # to adjust ACCEPT_LICENSE.
5251 if not self._pkg_visibility_check(pkg,
5252 trust_graph=False) and \
5253 (pkg_in_graph or 'LICENSE' in pkg.masks):
5254 self._dynamic_config._masked_installed.add(pkg)
5256 self._check_masks(pkg)
5258 blocker_atoms = None
5264 self._dynamic_config._blocker_parents.child_nodes(pkg))
5269 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
5273 # Select just the runtime blockers.
5274 blockers = [blocker for blocker in blockers \
5275 if blocker.priority.runtime or \
5276 blocker.priority.runtime_post]
5277 if blockers is not None:
5278 blockers = set(blocker.atom for blocker in blockers)
5280 # If this node has any blockers, create a "nomerge"
5281 # node for it so that they can be enforced.
5282 self._spinner_update()
5283 blocker_data = blocker_cache.get(cpv)
5284 if blocker_data is not None and \
5285 blocker_data.counter != pkg.counter:
5288 # If blocker data from the graph is available, use
5289 # it to validate the cache and update the cache if
5291 if blocker_data is not None and \
5292 blockers is not None:
5293 if not blockers.symmetric_difference(
5294 blocker_data.atoms):
5298 if blocker_data is None and \
5299 blockers is not None:
5300 # Re-use the blockers from the graph.
5301 blocker_atoms = sorted(blockers)
5303 blocker_cache.BlockerData(pkg.counter, blocker_atoms)
5304 blocker_cache[pkg.cpv] = blocker_data
5308 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
5310 # Use aux_get() to trigger FakeVartree global
5311 # updates on *DEPEND when appropriate.
5312 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5313 # It is crucial to pass in final_db here in order to
5314 # optimize dep_check calls by eliminating atoms via
5315 # dep_wordreduce and dep_eval calls.
5317 success, atoms = portage.dep_check(depstr,
5318 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
5319 trees=self._dynamic_config._graph_trees, myroot=myroot)
5322 except Exception as e:
5323 # This is helpful, for example, if a ValueError
5324 # is thrown from cpv_expand due to multiple
5325 # matches (this can happen if an atom lacks a
5327 show_invalid_depstring_notice(
5328 pkg, depstr, "%s" % (e,))
5332 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
5333 if replacement_pkg and \
5334 replacement_pkg[0].operation == "merge":
5335 # This package is being replaced anyway, so
5336 # ignore invalid dependencies so as not to
5337 # annoy the user too much (otherwise they'd be
5338 # forced to manually unmerge it first).
5340 show_invalid_depstring_notice(pkg, depstr, atoms)
5342 blocker_atoms = [myatom for myatom in atoms \
5344 blocker_atoms.sort()
5345 blocker_cache[cpv] = \
5346 blocker_cache.BlockerData(pkg.counter, blocker_atoms)
5349 for atom in blocker_atoms:
5350 blocker = Blocker(atom=atom,
5352 priority=self._priority(runtime=True),
5354 self._dynamic_config._blocker_parents.add(blocker, pkg)
5355 except portage.exception.InvalidAtom as e:
5356 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5357 show_invalid_depstring_notice(
5358 pkg, depstr, "Invalid Atom: %s" % (e,))
5360 for cpv in stale_cache:
5361 del blocker_cache[cpv]
5362 blocker_cache.flush()
5365 # Discard any "uninstall" tasks scheduled by previous calls
5366 # to this method, since those tasks may not make sense given
5367 # the current graph state.
5368 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
5369 if previous_uninstall_tasks:
5370 self._dynamic_config._blocker_uninstalls = digraph()
5371 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
5373 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
5374 self._spinner_update()
5375 root_config = self._frozen_config.roots[blocker.root]
5376 virtuals = root_config.settings.getvirtuals()
5377 myroot = blocker.root
5378 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
5379 final_db = self._dynamic_config.mydbapi[myroot]
5381 provider_virtual = False
5382 if blocker.cp in virtuals and \
5383 not self._have_new_virt(blocker.root, blocker.cp):
5384 provider_virtual = True
5386 # Use this to check PROVIDE for each matched package
5388 atom_set = InternalPackageSet(
5389 initial_atoms=[blocker.atom])
5391 if provider_virtual:
5393 for provider_entry in virtuals[blocker.cp]:
5394 atoms.append(Atom(blocker.atom.replace(
5395 blocker.cp, provider_entry.cp, 1)))
5397 atoms = [blocker.atom]
5399 blocked_initial = set()
5401 for pkg in initial_db.match_pkgs(atom):
5402 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5403 blocked_initial.add(pkg)
5405 blocked_final = set()
5407 for pkg in final_db.match_pkgs(atom):
5408 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5409 blocked_final.add(pkg)
5411 if not blocked_initial and not blocked_final:
5412 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
5413 self._dynamic_config._blocker_parents.remove(blocker)
5414 # Discard any parents that don't have any more blockers.
5415 for pkg in parent_pkgs:
5416 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
5417 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
5418 self._dynamic_config._blocker_parents.remove(pkg)
5420 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
5421 unresolved_blocks = False
5422 depends_on_order = set()
5423 for pkg in blocked_initial:
5424 if pkg.slot_atom == parent.slot_atom and \
5425 not blocker.atom.blocker.overlap.forbid:
5426 # New !!atom blockers do not allow temporary
5427 # simulaneous installation, so unlike !atom
5428 # blockers, !!atom blockers aren't ignored
5429 # when they match other packages occupying
5432 if parent.installed:
5433 # Two currently installed packages conflict with
5434 # eachother. Ignore this case since the damage
5435 # is already done and this would be likely to
5436 # confuse users if displayed like a normal blocker.
5439 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5441 if parent.operation == "merge":
5442 # Maybe the blocked package can be replaced or simply
5443 # unmerged to resolve this block.
5444 depends_on_order.add((pkg, parent))
5446 # None of the above blocker resolutions techniques apply,
5447 # so apparently this one is unresolvable.
5448 unresolved_blocks = True
5449 for pkg in blocked_final:
5450 if pkg.slot_atom == parent.slot_atom and \
5451 not blocker.atom.blocker.overlap.forbid:
5452 # New !!atom blockers do not allow temporary
5453 # simulaneous installation, so unlike !atom
5454 # blockers, !!atom blockers aren't ignored
5455 # when they match other packages occupying
5458 if parent.operation == "nomerge" and \
5459 pkg.operation == "nomerge":
5460 # This blocker will be handled the next time that a
5461 # merge of either package is triggered.
5464 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5466 # Maybe the blocking package can be
5467 # unmerged to resolve this block.
5468 if parent.operation == "merge" and pkg.installed:
5469 depends_on_order.add((pkg, parent))
5471 elif parent.operation == "nomerge":
5472 depends_on_order.add((parent, pkg))
5474 # None of the above blocker resolutions techniques apply,
5475 # so apparently this one is unresolvable.
5476 unresolved_blocks = True
5478 # Make sure we don't unmerge any package that have been pulled
5480 if not unresolved_blocks and depends_on_order:
5481 for inst_pkg, inst_task in depends_on_order:
5482 if self._dynamic_config.digraph.contains(inst_pkg) and \
5483 self._dynamic_config.digraph.parent_nodes(inst_pkg):
5484 unresolved_blocks = True
5487 if not unresolved_blocks and depends_on_order:
5488 for inst_pkg, inst_task in depends_on_order:
5489 uninst_task = Package(built=inst_pkg.built,
5490 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5491 metadata=inst_pkg._metadata,
5492 operation="uninstall",
5493 root_config=inst_pkg.root_config,
5494 type_name=inst_pkg.type_name)
5495 # Enforce correct merge order with a hard dep.
5496 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
5497 priority=BlockerDepPriority.instance)
5498 # Count references to this blocker so that it can be
5499 # invalidated after nodes referencing it have been
5501 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
5502 if not unresolved_blocks and not depends_on_order:
5503 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
5504 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
5505 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
5506 self._dynamic_config._blocker_parents.remove(blocker)
5507 if not self._dynamic_config._blocker_parents.child_nodes(parent):
5508 self._dynamic_config._blocker_parents.remove(parent)
5509 if unresolved_blocks:
5510 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
5514 def _accept_blocker_conflicts(self):
5516 for x in ("--buildpkgonly", "--fetchonly",
5517 "--fetch-all-uri", "--nodeps"):
5518 if x in self._frozen_config.myopts:
5523 def _merge_order_bias(self, mygraph):
5525 For optimal leaf node selection, promote deep system runtime deps and
5526 order nodes from highest to lowest overall reference count.
5530 for node in mygraph.order:
5531 node_info[node] = len(mygraph.parent_nodes(node))
5532 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
5534 def cmp_merge_preference(node1, node2):
5536 if node1.operation == 'uninstall':
5537 if node2.operation == 'uninstall':
5541 if node2.operation == 'uninstall':
5542 if node1.operation == 'uninstall':
5546 node1_sys = node1 in deep_system_deps
5547 node2_sys = node2 in deep_system_deps
5548 if node1_sys != node2_sys:
5553 return node_info[node2] - node_info[node1]
5555 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
5557 def altlist(self, reversed=False):
5559 while self._dynamic_config._serialized_tasks_cache is None:
5560 self._resolve_conflicts()
5562 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
5563 self._serialize_tasks()
5564 except self._serialize_tasks_retry:
5567 retlist = self._dynamic_config._serialized_tasks_cache[:]
5572 def _implicit_libc_deps(self, mergelist, graph):
5574 Create implicit dependencies on libc, in order to ensure that libc
5575 is installed as early as possible (see bug #303567).
5578 implicit_libc_roots = (self._frozen_config._running_root.root,)
5579 for root in implicit_libc_roots:
5580 graphdb = self._dynamic_config.mydbapi[root]
5581 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5582 for atom in self._expand_virt_from_graph(root,
5583 portage.const.LIBC_PACKAGE_ATOM):
5586 match = graphdb.match_pkgs(atom)
5590 if pkg.operation == "merge" and \
5591 not vardb.cpv_exists(pkg.cpv):
5592 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
5597 earlier_libc_pkgs = set()
5599 for pkg in mergelist:
5600 if not isinstance(pkg, Package):
5601 # a satisfied blocker
5603 root_libc_pkgs = libc_pkgs.get(pkg.root)
5604 if root_libc_pkgs is not None and \
5605 pkg.operation == "merge":
5606 if pkg in root_libc_pkgs:
5607 earlier_libc_pkgs.add(pkg)
5609 for libc_pkg in root_libc_pkgs:
5610 if libc_pkg in earlier_libc_pkgs:
5611 graph.add(libc_pkg, pkg,
5612 priority=DepPriority(buildtime=True))
5614 def schedulerGraph(self):
5616 The scheduler graph is identical to the normal one except that
5617 uninstall edges are reversed in specific cases that require
5618 conflicting packages to be temporarily installed simultaneously.
5619 This is intended for use by the Scheduler in it's parallelization
5620 logic. It ensures that temporary simultaneous installation of
5621 conflicting packages is avoided when appropriate (especially for
5622 !!atom blockers), but allowed in specific cases that require it.
5624 Note that this method calls break_refs() which alters the state of
5625 internal Package instances such that this depgraph instance should
5626 not be used to perform any more calculations.
5629 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
5630 mergelist = self.altlist()
5631 self._implicit_libc_deps(mergelist,
5632 self._dynamic_config._scheduler_graph)
5634 # Break DepPriority.satisfied attributes which reference
5635 # installed Package instances.
5636 for parents, children, node in \
5637 self._dynamic_config._scheduler_graph.nodes.values():
5638 for priorities in chain(parents.values(), children.values()):
5639 for priority in priorities:
5640 if priority.satisfied:
5641 priority.satisfied = True
5643 pkg_cache = self._frozen_config._pkg_cache
5644 graph = self._dynamic_config._scheduler_graph
5645 trees = self._frozen_config.trees
5646 pruned_pkg_cache = {}
5647 for key, pkg in pkg_cache.items():
5648 if pkg in graph or \
5649 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
5650 pruned_pkg_cache[key] = pkg
5653 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
5657 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
5661 def break_refs(self):
5663 Break any references in Package instances that lead back to the depgraph.
5664 This is useful if you want to hold references to packages without also
5665 holding the depgraph on the heap. It should only be called after the
5666 depgraph and _frozen_config will not be used for any more calculations.
5668 for root_config in self._frozen_config.roots.values():
5669 root_config.update(self._frozen_config._trees_orig[
5670 root_config.root]["root_config"])
5671 # Both instances are now identical, so discard the
5672 # original which should have no other references.
5673 self._frozen_config._trees_orig[
5674 root_config.root]["root_config"] = root_config
5676 def _resolve_conflicts(self):
5678 if "complete" not in self._dynamic_config.myparams and \
5679 self._dynamic_config._allow_backtracking and \
5680 self._dynamic_config._slot_collision_nodes and \
5681 not self._accept_blocker_conflicts():
5682 self._dynamic_config.myparams["complete"] = True
5684 if not self._complete_graph():
5685 raise self._unknown_internal_error()
5687 self._process_slot_conflicts()
5689 self._slot_operator_trigger_reinstalls()
5691 if not self._validate_blockers():
5692 self._dynamic_config._skip_restart = True
5693 raise self._unknown_internal_error()
5695 def _serialize_tasks(self):
5697 debug = "--debug" in self._frozen_config.myopts
5700 writemsg("\ndigraph:\n\n", noiselevel=-1)
5701 self._dynamic_config.digraph.debug_print()
5702 writemsg("\n", noiselevel=-1)
5704 scheduler_graph = self._dynamic_config.digraph.copy()
5706 if '--nodeps' in self._frozen_config.myopts:
5707 # Preserve the package order given on the command line.
5708 return ([node for node in scheduler_graph \
5709 if isinstance(node, Package) \
5710 and node.operation == 'merge'], scheduler_graph)
5712 mygraph=self._dynamic_config.digraph.copy()
5714 removed_nodes = set()
5716 # Prune off all DependencyArg instances since they aren't
5717 # needed, and because of nested sets this is faster than doing
5718 # it with multiple digraph.root_nodes() calls below. This also
5719 # takes care of nested sets that have circular references,
5720 # which wouldn't be matched by digraph.root_nodes().
5721 for node in mygraph:
5722 if isinstance(node, DependencyArg):
5723 removed_nodes.add(node)
5725 mygraph.difference_update(removed_nodes)
5726 removed_nodes.clear()
5728 # Prune "nomerge" root nodes if nothing depends on them, since
5729 # otherwise they slow down merge order calculation. Don't remove
5730 # non-root nodes since they help optimize merge order in some cases
5731 # such as revdep-rebuild.
5734 for node in mygraph.root_nodes():
5735 if not isinstance(node, Package) or \
5736 node.installed or node.onlydeps:
5737 removed_nodes.add(node)
5739 self._spinner_update()
5740 mygraph.difference_update(removed_nodes)
5741 if not removed_nodes:
5743 removed_nodes.clear()
5744 self._merge_order_bias(mygraph)
5745 def cmp_circular_bias(n1, n2):
5747 RDEPEND is stronger than PDEPEND and this function
5748 measures such a strength bias within a circular
5749 dependency relationship.
5751 n1_n2_medium = n2 in mygraph.child_nodes(n1,
5752 ignore_priority=priority_range.ignore_medium_soft)
5753 n2_n1_medium = n1 in mygraph.child_nodes(n2,
5754 ignore_priority=priority_range.ignore_medium_soft)
5755 if n1_n2_medium == n2_n1_medium:
5760 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
5762 # Contains uninstall tasks that have been scheduled to
5763 # occur after overlapping blockers have been installed.
5764 scheduled_uninstalls = set()
5765 # Contains any Uninstall tasks that have been ignored
5766 # in order to avoid the circular deps code path. These
5767 # correspond to blocker conflicts that could not be
5769 ignored_uninstall_tasks = set()
5770 have_uninstall_task = False
5771 complete = "complete" in self._dynamic_config.myparams
5774 def get_nodes(**kwargs):
5776 Returns leaf nodes excluding Uninstall instances
5777 since those should be executed as late as possible.
5779 return [node for node in mygraph.leaf_nodes(**kwargs) \
5780 if isinstance(node, Package) and \
5781 (node.operation != "uninstall" or \
5782 node in scheduled_uninstalls)]
5784 # sys-apps/portage needs special treatment if ROOT="/"
5785 running_root = self._frozen_config._running_root.root
5786 runtime_deps = InternalPackageSet(
5787 initial_atoms=[PORTAGE_PACKAGE_ATOM])
5788 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
5789 PORTAGE_PACKAGE_ATOM)
5790 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
5791 PORTAGE_PACKAGE_ATOM)
5794 running_portage = running_portage[0]
5796 running_portage = None
5798 if replacement_portage:
5799 replacement_portage = replacement_portage[0]
5801 replacement_portage = None
5803 if replacement_portage == running_portage:
5804 replacement_portage = None
5806 if running_portage is not None:
5808 portage_rdepend = self._select_atoms_highest_available(
5809 running_root, running_portage._metadata["RDEPEND"],
5810 myuse=self._pkg_use_enabled(running_portage),
5811 parent=running_portage, strict=False)
5812 except portage.exception.InvalidDependString as e:
5813 portage.writemsg("!!! Invalid RDEPEND in " + \
5814 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5815 (running_root, running_portage.cpv, e), noiselevel=-1)
5817 portage_rdepend = {running_portage : []}
5818 for atoms in portage_rdepend.values():
5819 runtime_deps.update(atom for atom in atoms \
5820 if not atom.blocker)
5822 # Merge libc asap, in order to account for implicit
5823 # dependencies. See bug #303567.
5824 implicit_libc_roots = (running_root,)
5825 for root in implicit_libc_roots:
5827 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5828 graphdb = self._dynamic_config.mydbapi[root]
5829 for atom in self._expand_virt_from_graph(root,
5830 portage.const.LIBC_PACKAGE_ATOM):
5833 match = graphdb.match_pkgs(atom)
5837 if pkg.operation == "merge" and \
5838 not vardb.cpv_exists(pkg.cpv):
5842 # If there's also an os-headers upgrade, we need to
5843 # pull that in first. See bug #328317.
5844 for atom in self._expand_virt_from_graph(root,
5845 portage.const.OS_HEADERS_PACKAGE_ATOM):
5848 match = graphdb.match_pkgs(atom)
5852 if pkg.operation == "merge" and \
5853 not vardb.cpv_exists(pkg.cpv):
5854 asap_nodes.append(pkg)
5856 asap_nodes.extend(libc_pkgs)
5858 def gather_deps(ignore_priority, mergeable_nodes,
5859 selected_nodes, node):
5861 Recursively gather a group of nodes that RDEPEND on
5862 eachother. This ensures that they are merged as a group
5863 and get their RDEPENDs satisfied as soon as possible.
5865 if node in selected_nodes:
5867 if node not in mergeable_nodes:
5869 if node == replacement_portage and \
5870 mygraph.child_nodes(node,
5871 ignore_priority=priority_range.ignore_medium_soft):
5872 # Make sure that portage always has all of it's
5873 # RDEPENDs installed first.
5875 selected_nodes.add(node)
5876 for child in mygraph.child_nodes(node,
5877 ignore_priority=ignore_priority):
5878 if not gather_deps(ignore_priority,
5879 mergeable_nodes, selected_nodes, child):
5883 def ignore_uninst_or_med(priority):
5884 if priority is BlockerDepPriority.instance:
5886 return priority_range.ignore_medium(priority)
5888 def ignore_uninst_or_med_soft(priority):
5889 if priority is BlockerDepPriority.instance:
5891 return priority_range.ignore_medium_soft(priority)
5893 tree_mode = "--tree" in self._frozen_config.myopts
5894 # Tracks whether or not the current iteration should prefer asap_nodes
5895 # if available. This is set to False when the previous iteration
5896 # failed to select any nodes. It is reset whenever nodes are
5897 # successfully selected.
5900 # Controls whether or not the current iteration should drop edges that
5901 # are "satisfied" by installed packages, in order to solve circular
5902 # dependencies. The deep runtime dependencies of installed packages are
5903 # not checked in this case (bug #199856), so it must be avoided
5904 # whenever possible.
5905 drop_satisfied = False
5907 # State of variables for successive iterations that loosen the
5908 # criteria for node selection.
5910 # iteration prefer_asap drop_satisfied
5915 # If no nodes are selected on the last iteration, it is due to
5916 # unresolved blockers or circular dependencies.
5919 self._spinner_update()
5920 selected_nodes = None
5921 ignore_priority = None
5922 if drop_satisfied or (prefer_asap and asap_nodes):
5923 priority_range = DepPrioritySatisfiedRange
5925 priority_range = DepPriorityNormalRange
5926 if prefer_asap and asap_nodes:
5927 # ASAP nodes are merged before their soft deps. Go ahead and
5928 # select root nodes here if necessary, since it's typical for
5929 # the parent to have been removed from the graph already.
5930 asap_nodes = [node for node in asap_nodes \
5931 if mygraph.contains(node)]
5932 for i in range(priority_range.SOFT,
5933 priority_range.MEDIUM_SOFT + 1):
5934 ignore_priority = priority_range.ignore_priority[i]
5935 for node in asap_nodes:
5936 if not mygraph.child_nodes(node,
5937 ignore_priority=ignore_priority):
5938 selected_nodes = [node]
5939 asap_nodes.remove(node)
5944 if not selected_nodes and \
5945 not (prefer_asap and asap_nodes):
5946 for i in range(priority_range.NONE,
5947 priority_range.MEDIUM_SOFT + 1):
5948 ignore_priority = priority_range.ignore_priority[i]
5949 nodes = get_nodes(ignore_priority=ignore_priority)
5951 # If there is a mixture of merges and uninstalls,
5952 # do the uninstalls first.
5953 good_uninstalls = None
5955 good_uninstalls = []
5957 if node.operation == "uninstall":
5958 good_uninstalls.append(node)
5961 nodes = good_uninstalls
5965 if good_uninstalls or len(nodes) == 1 or \
5966 (ignore_priority is None and \
5967 not asap_nodes and not tree_mode):
5968 # Greedily pop all of these nodes since no
5969 # relationship has been ignored. This optimization
5970 # destroys --tree output, so it's disabled in tree
5972 selected_nodes = nodes
5974 # For optimal merge order:
5975 # * Only pop one node.
5976 # * Removing a root node (node without a parent)
5977 # will not produce a leaf node, so avoid it.
5978 # * It's normal for a selected uninstall to be a
5979 # root node, so don't check them for parents.
5981 prefer_asap_parents = (True, False)
5983 prefer_asap_parents = (False,)
5984 for check_asap_parent in prefer_asap_parents:
5985 if check_asap_parent:
5987 parents = mygraph.parent_nodes(node,
5988 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5989 if any(x in asap_nodes for x in parents):
5990 selected_nodes = [node]
5994 if mygraph.parent_nodes(node):
5995 selected_nodes = [node]
6002 if not selected_nodes:
6003 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
6005 mergeable_nodes = set(nodes)
6006 if prefer_asap and asap_nodes:
6008 # When gathering the nodes belonging to a runtime cycle,
6009 # we want to minimize the number of nodes gathered, since
6010 # this tends to produce a more optimal merge order.
6011 # Ignoring all medium_soft deps serves this purpose.
6012 # In the case of multiple runtime cycles, where some cycles
6013 # may depend on smaller independent cycles, it's optimal
6014 # to merge smaller independent cycles before other cycles
6015 # that depend on them. Therefore, we search for the
6016 # smallest cycle in order to try and identify and prefer
6017 # these smaller independent cycles.
6018 ignore_priority = priority_range.ignore_medium_soft
6019 smallest_cycle = None
6021 if not mygraph.parent_nodes(node):
6023 selected_nodes = set()
6024 if gather_deps(ignore_priority,
6025 mergeable_nodes, selected_nodes, node):
6026 # When selecting asap_nodes, we need to ensure
6027 # that we haven't selected a large runtime cycle
6028 # that is obviously sub-optimal. This will be
6029 # obvious if any of the non-asap selected_nodes
6030 # is a leaf node when medium_soft deps are
6032 if prefer_asap and asap_nodes and \
6033 len(selected_nodes) > 1:
6034 for node in selected_nodes.difference(
6036 if not mygraph.child_nodes(node,
6038 DepPriorityNormalRange.ignore_medium_soft):
6039 selected_nodes = None
6042 if smallest_cycle is None or \
6043 len(selected_nodes) < len(smallest_cycle):
6044 smallest_cycle = selected_nodes
6046 selected_nodes = smallest_cycle
6048 if selected_nodes and debug:
6049 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
6050 (len(selected_nodes),), noiselevel=-1)
6051 cycle_digraph = mygraph.copy()
6052 cycle_digraph.difference_update([x for x in
6053 cycle_digraph if x not in selected_nodes])
6054 cycle_digraph.debug_print()
6055 writemsg("\n", noiselevel=-1)
6057 if prefer_asap and asap_nodes and not selected_nodes:
6058 # We failed to find any asap nodes to merge, so ignore
6059 # them for the next iteration.
6063 if selected_nodes and ignore_priority is not None:
6064 # Try to merge ignored medium_soft deps as soon as possible
6065 # if they're not satisfied by installed packages.
6066 for node in selected_nodes:
6067 children = set(mygraph.child_nodes(node))
6068 soft = children.difference(
6069 mygraph.child_nodes(node,
6070 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
6071 medium_soft = children.difference(
6072 mygraph.child_nodes(node,
6074 DepPrioritySatisfiedRange.ignore_medium_soft))
6075 medium_soft.difference_update(soft)
6076 for child in medium_soft:
6077 if child in selected_nodes:
6079 if child in asap_nodes:
6081 # Merge PDEPEND asap for bug #180045.
6082 asap_nodes.append(child)
6084 if selected_nodes and len(selected_nodes) > 1:
6085 if not isinstance(selected_nodes, list):
6086 selected_nodes = list(selected_nodes)
6087 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
6089 if not selected_nodes and myblocker_uninstalls:
6090 # An Uninstall task needs to be executed in order to
6091 # avoid conflict if possible.
6094 priority_range = DepPrioritySatisfiedRange
6096 priority_range = DepPriorityNormalRange
6098 mergeable_nodes = get_nodes(
6099 ignore_priority=ignore_uninst_or_med)
6101 min_parent_deps = None
6104 for task in myblocker_uninstalls.leaf_nodes():
6105 # Do some sanity checks so that system or world packages
6106 # don't get uninstalled inappropriately here (only really
6107 # necessary when --complete-graph has not been enabled).
6109 if task in ignored_uninstall_tasks:
6112 if task in scheduled_uninstalls:
6113 # It's been scheduled but it hasn't
6114 # been executed yet due to dependence
6115 # on installation of blocking packages.
6118 root_config = self._frozen_config.roots[task.root]
6119 inst_pkg = self._pkg(task.cpv, "installed", root_config,
6122 if self._dynamic_config.digraph.contains(inst_pkg):
6125 forbid_overlap = False
6126 heuristic_overlap = False
6127 for blocker in myblocker_uninstalls.parent_nodes(task):
6128 if not eapi_has_strong_blocks(blocker.eapi):
6129 heuristic_overlap = True
6130 elif blocker.atom.blocker.overlap.forbid:
6131 forbid_overlap = True
6133 if forbid_overlap and running_root == task.root:
6136 if heuristic_overlap and running_root == task.root:
6137 # Never uninstall sys-apps/portage or it's essential
6138 # dependencies, except through replacement.
6140 runtime_dep_atoms = \
6141 list(runtime_deps.iterAtomsForPackage(task))
6142 except portage.exception.InvalidDependString as e:
6143 portage.writemsg("!!! Invalid PROVIDE in " + \
6144 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6145 (task.root, task.cpv, e), noiselevel=-1)
6149 # Don't uninstall a runtime dep if it appears
6150 # to be the only suitable one installed.
6152 vardb = root_config.trees["vartree"].dbapi
6153 for atom in runtime_dep_atoms:
6154 other_version = None
6155 for pkg in vardb.match_pkgs(atom):
6156 if pkg.cpv == task.cpv and \
6157 pkg.counter == task.counter:
6161 if other_version is None:
6167 # For packages in the system set, don't take
6168 # any chances. If the conflict can't be resolved
6169 # by a normal replacement operation then abort.
6172 for atom in root_config.sets[
6173 "system"].iterAtomsForPackage(task):
6176 except portage.exception.InvalidDependString as e:
6177 portage.writemsg("!!! Invalid PROVIDE in " + \
6178 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6179 (task.root, task.cpv, e), noiselevel=-1)
6185 # Note that the world check isn't always
6186 # necessary since self._complete_graph() will
6187 # add all packages from the system and world sets to the
6188 # graph. This just allows unresolved conflicts to be
6189 # detected as early as possible, which makes it possible
6190 # to avoid calling self._complete_graph() when it is
6191 # unnecessary due to blockers triggering an abortion.
6193 # For packages in the world set, go ahead an uninstall
6194 # when necessary, as long as the atom will be satisfied
6195 # in the final state.
6196 graph_db = self._dynamic_config.mydbapi[task.root]
6199 for atom in root_config.sets[
6200 "selected"].iterAtomsForPackage(task):
6202 for pkg in graph_db.match_pkgs(atom):
6209 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
6211 except portage.exception.InvalidDependString as e:
6212 portage.writemsg("!!! Invalid PROVIDE in " + \
6213 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6214 (task.root, task.cpv, e), noiselevel=-1)
6220 # Check the deps of parent nodes to ensure that
6221 # the chosen task produces a leaf node. Maybe
6222 # this can be optimized some more to make the
6223 # best possible choice, but the current algorithm
6224 # is simple and should be near optimal for most
6226 self._spinner_update()
6227 mergeable_parent = False
6229 parent_deps.add(task)
6230 for parent in mygraph.parent_nodes(task):
6231 parent_deps.update(mygraph.child_nodes(parent,
6232 ignore_priority=priority_range.ignore_medium_soft))
6233 if min_parent_deps is not None and \
6234 len(parent_deps) >= min_parent_deps:
6235 # This task is no better than a previously selected
6236 # task, so abort search now in order to avoid wasting
6237 # any more cpu time on this task. This increases
6238 # performance dramatically in cases when there are
6239 # hundreds of blockers to solve, like when
6240 # upgrading to a new slot of kde-meta.
6241 mergeable_parent = None
6243 if parent in mergeable_nodes and \
6244 gather_deps(ignore_uninst_or_med_soft,
6245 mergeable_nodes, set(), parent):
6246 mergeable_parent = True
6248 if not mergeable_parent:
6251 if min_parent_deps is None or \
6252 len(parent_deps) < min_parent_deps:
6253 min_parent_deps = len(parent_deps)
6256 if uninst_task is not None and min_parent_deps == 1:
6257 # This is the best possible result, so so abort search
6258 # now in order to avoid wasting any more cpu time.
6261 if uninst_task is not None:
6262 # The uninstall is performed only after blocking
6263 # packages have been merged on top of it. File
6264 # collisions between blocking packages are detected
6265 # and removed from the list of files to be uninstalled.
6266 scheduled_uninstalls.add(uninst_task)
6267 parent_nodes = mygraph.parent_nodes(uninst_task)
6269 # Reverse the parent -> uninstall edges since we want
6270 # to do the uninstall after blocking packages have
6271 # been merged on top of it.
6272 mygraph.remove(uninst_task)
6273 for blocked_pkg in parent_nodes:
6274 mygraph.add(blocked_pkg, uninst_task,
6275 priority=BlockerDepPriority.instance)
6276 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6277 scheduler_graph.add(blocked_pkg, uninst_task,
6278 priority=BlockerDepPriority.instance)
6280 # Sometimes a merge node will render an uninstall
6281 # node unnecessary (due to occupying the same SLOT),
6282 # and we want to avoid executing a separate uninstall
6283 # task in that case.
6284 slot_node = self._dynamic_config.mydbapi[uninst_task.root
6285 ].match_pkgs(uninst_task.slot_atom)
6287 slot_node[0].operation == "merge":
6288 mygraph.add(slot_node[0], uninst_task,
6289 priority=BlockerDepPriority.instance)
6291 # Reset the state variables for leaf node selection and
6292 # continue trying to select leaf nodes.
6294 drop_satisfied = False
6297 if not selected_nodes:
6298 # Only select root nodes as a last resort. This case should
6299 # only trigger when the graph is nearly empty and the only
6300 # remaining nodes are isolated (no parents or children). Since
6301 # the nodes must be isolated, ignore_priority is not needed.
6302 selected_nodes = get_nodes()
6304 if not selected_nodes and not drop_satisfied:
6305 drop_satisfied = True
6308 if not selected_nodes and myblocker_uninstalls:
6309 # If possible, drop an uninstall task here in order to avoid
6310 # the circular deps code path. The corresponding blocker will
6311 # still be counted as an unresolved conflict.
6313 for node in myblocker_uninstalls.leaf_nodes():
6315 mygraph.remove(node)
6320 ignored_uninstall_tasks.add(node)
6323 if uninst_task is not None:
6324 # Reset the state variables for leaf node selection and
6325 # continue trying to select leaf nodes.
6327 drop_satisfied = False
6330 if not selected_nodes:
6331 self._dynamic_config._circular_deps_for_display = mygraph
6332 self._dynamic_config._skip_restart = True
6333 raise self._unknown_internal_error()
6335 # At this point, we've succeeded in selecting one or more nodes, so
6336 # reset state variables for leaf node selection.
6338 drop_satisfied = False
6340 mygraph.difference_update(selected_nodes)
6342 for node in selected_nodes:
6343 if isinstance(node, Package) and \
6344 node.operation == "nomerge":
6347 # Handle interactions between blockers
6348 # and uninstallation tasks.
6349 solved_blockers = set()
6351 if isinstance(node, Package) and \
6352 "uninstall" == node.operation:
6353 have_uninstall_task = True
6356 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
6357 inst_pkg = vardb.match_pkgs(node.slot_atom)
6359 # The package will be replaced by this one, so remove
6360 # the corresponding Uninstall task if necessary.
6361 inst_pkg = inst_pkg[0]
6362 uninst_task = Package(built=inst_pkg.built,
6363 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6364 metadata=inst_pkg._metadata,
6365 operation="uninstall",
6366 root_config=inst_pkg.root_config,
6367 type_name=inst_pkg.type_name)
6369 mygraph.remove(uninst_task)
6373 if uninst_task is not None and \
6374 uninst_task not in ignored_uninstall_tasks and \
6375 myblocker_uninstalls.contains(uninst_task):
6376 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6377 myblocker_uninstalls.remove(uninst_task)
6378 # Discard any blockers that this Uninstall solves.
6379 for blocker in blocker_nodes:
6380 if not myblocker_uninstalls.child_nodes(blocker):
6381 myblocker_uninstalls.remove(blocker)
6383 self._dynamic_config._unsolvable_blockers:
6384 solved_blockers.add(blocker)
6386 retlist.append(node)
6388 if (isinstance(node, Package) and \
6389 "uninstall" == node.operation) or \
6390 (uninst_task is not None and \
6391 uninst_task in scheduled_uninstalls):
6392 # Include satisfied blockers in the merge list
6393 # since the user might be interested and also
6394 # it serves as an indicator that blocking packages
6395 # will be temporarily installed simultaneously.
6396 for blocker in solved_blockers:
6397 retlist.append(blocker)
6399 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
6400 for node in myblocker_uninstalls.root_nodes():
6401 unsolvable_blockers.add(node)
6403 # If any Uninstall tasks need to be executed in order
6404 # to avoid a conflict, complete the graph with any
6405 # dependencies that may have been initially
6406 # neglected (to ensure that unsafe Uninstall tasks
6407 # are properly identified and blocked from execution).
6408 if have_uninstall_task and \
6410 not unsolvable_blockers:
6411 self._dynamic_config.myparams["complete"] = True
6412 if '--debug' in self._frozen_config.myopts:
6414 msg.append("enabling 'complete' depgraph mode " + \
6415 "due to uninstall task(s):")
6417 for node in retlist:
6418 if isinstance(node, Package) and \
6419 node.operation == 'uninstall':
6420 msg.append("\t%s" % (node,))
6421 writemsg_level("\n%s\n" % \
6422 "".join("%s\n" % line for line in msg),
6423 level=logging.DEBUG, noiselevel=-1)
6424 raise self._serialize_tasks_retry("")
6426 # Set satisfied state on blockers, but not before the
6427 # above retry path, since we don't want to modify the
6428 # state in that case.
6429 for node in retlist:
6430 if isinstance(node, Blocker):
6431 node.satisfied = True
6433 for blocker in unsolvable_blockers:
6434 retlist.append(blocker)
6436 if unsolvable_blockers and \
6437 not self._accept_blocker_conflicts():
6438 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
6439 self._dynamic_config._serialized_tasks_cache = retlist[:]
6440 self._dynamic_config._scheduler_graph = scheduler_graph
6441 self._dynamic_config._skip_restart = True
6442 raise self._unknown_internal_error()
6444 if self._dynamic_config._slot_collision_info and \
6445 not self._accept_blocker_conflicts():
6446 self._dynamic_config._serialized_tasks_cache = retlist[:]
6447 self._dynamic_config._scheduler_graph = scheduler_graph
6448 raise self._unknown_internal_error()
6450 return retlist, scheduler_graph
6452 def _show_circular_deps(self, mygraph):
6453 self._dynamic_config._circular_dependency_handler = \
6454 circular_dependency_handler(self, mygraph)
6455 handler = self._dynamic_config._circular_dependency_handler
6457 self._frozen_config.myopts.pop("--quiet", None)
6458 self._frozen_config.myopts["--verbose"] = True
6459 self._frozen_config.myopts["--tree"] = True
6460 portage.writemsg("\n\n", noiselevel=-1)
6461 self.display(handler.merge_list)
6462 prefix = colorize("BAD", " * ")
6463 portage.writemsg("\n", noiselevel=-1)
6464 portage.writemsg(prefix + "Error: circular dependencies:\n",
6466 portage.writemsg("\n", noiselevel=-1)
6468 if handler.circular_dep_message is None:
6469 handler.debug_print()
6470 portage.writemsg("\n", noiselevel=-1)
6472 if handler.circular_dep_message is not None:
6473 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
6475 suggestions = handler.suggestions
6477 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
6478 if len(suggestions) == 1:
6479 writemsg("by applying the following change:\n", noiselevel=-1)
6481 writemsg("by applying " + colorize("bold", "any of") + \
6482 " the following changes:\n", noiselevel=-1)
6483 writemsg("".join(suggestions), noiselevel=-1)
6484 writemsg("\nNote that this change can be reverted, once the package has" + \
6485 " been installed.\n", noiselevel=-1)
6486 if handler.large_cycle_count:
6487 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
6488 "Several changes might be required to resolve all cycles.\n" + \
6489 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
6491 writemsg("\n\n", noiselevel=-1)
6492 writemsg(prefix + "Note that circular dependencies " + \
6493 "can often be avoided by temporarily\n", noiselevel=-1)
6494 writemsg(prefix + "disabling USE flags that trigger " + \
6495 "optional dependencies.\n", noiselevel=-1)
6497 def _show_merge_list(self):
6498 if self._dynamic_config._serialized_tasks_cache is not None and \
6499 not (self._dynamic_config._displayed_list is not None and \
6500 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
6501 self._dynamic_config._displayed_list == \
6502 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
6503 display_list = self._dynamic_config._serialized_tasks_cache[:]
6504 if "--tree" in self._frozen_config.myopts:
6505 display_list.reverse()
6506 self.display(display_list)
6508 def _show_unsatisfied_blockers(self, blockers):
6509 self._show_merge_list()
6510 msg = "Error: The above package list contains " + \
6511 "packages which cannot be installed " + \
6512 "at the same time on the same system."
6513 prefix = colorize("BAD", " * ")
6514 portage.writemsg("\n", noiselevel=-1)
6515 for line in textwrap.wrap(msg, 70):
6516 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6518 # Display the conflicting packages along with the packages
6519 # that pulled them in. This is helpful for troubleshooting
6520 # cases in which blockers don't solve automatically and
6521 # the reasons are not apparent from the normal merge list
6525 for blocker in blockers:
6526 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
6527 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
6528 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
6529 if not parent_atoms:
6530 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
6531 if atom is not None:
6532 parent_atoms = set([("@selected", atom)])
6534 conflict_pkgs[pkg] = parent_atoms
6537 # Reduce noise by pruning packages that are only
6538 # pulled in by other conflict packages.
6540 for pkg, parent_atoms in conflict_pkgs.items():
6541 relevant_parent = False
6542 for parent, atom in parent_atoms:
6543 if parent not in conflict_pkgs:
6544 relevant_parent = True
6546 if not relevant_parent:
6547 pruned_pkgs.add(pkg)
6548 for pkg in pruned_pkgs:
6549 del conflict_pkgs[pkg]
6555 for pkg, parent_atoms in conflict_pkgs.items():
6557 # Prefer packages that are not directly involved in a conflict.
6558 # It can be essential to see all the packages here, so don't
6559 # omit any. If the list is long, people can simply use a pager.
6560 preferred_parents = set()
6561 for parent_atom in parent_atoms:
6562 parent, atom = parent_atom
6563 if parent not in conflict_pkgs:
6564 preferred_parents.add(parent_atom)
6566 ordered_list = list(preferred_parents)
6567 if len(parent_atoms) > len(ordered_list):
6568 for parent_atom in parent_atoms:
6569 if parent_atom not in preferred_parents:
6570 ordered_list.append(parent_atom)
6572 msg.append(indent + "%s pulled in by\n" % pkg)
6574 for parent_atom in ordered_list:
6575 parent, atom = parent_atom
6576 msg.append(2*indent)
6577 if isinstance(parent,
6578 (PackageArg, AtomArg)):
6579 # For PackageArg and AtomArg types, it's
6580 # redundant to display the atom attribute.
6581 msg.append(str(parent))
6583 # Display the specific atom from SetArg or
6585 msg.append("%s required by %s" % (atom, parent))
6590 writemsg("".join(msg), noiselevel=-1)
6592 if "--quiet" not in self._frozen_config.myopts:
6593 show_blocker_docs_link()
6595 def display(self, mylist, favorites=[], verbosity=None):
6597 # This is used to prevent display_problems() from
6598 # redundantly displaying this exact same merge list
6599 # again via _show_merge_list().
6600 self._dynamic_config._displayed_list = mylist
6603 return display(self, mylist, favorites, verbosity)
6605 def _display_autounmask(self):
6607 Display --autounmask message and optionally write it to config files
6608 (using CONFIG_PROTECT). The message includes the comments and the changes.
6611 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
6612 autounmask_unrestricted_atoms = \
6613 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
6614 quiet = "--quiet" in self._frozen_config.myopts
6615 pretend = "--pretend" in self._frozen_config.myopts
6616 ask = "--ask" in self._frozen_config.myopts
6617 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
6619 def check_if_latest(pkg):
6621 is_latest_in_slot = True
6622 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
6623 root_config = self._frozen_config.roots[pkg.root]
6625 for db, pkg_type, built, installed, db_keys in dbs:
6626 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
6627 if other_pkg.cp != pkg.cp:
6628 # old-style PROVIDE virtual means there are no
6629 # normal matches for this pkg_type
6633 if other_pkg.slot_atom == pkg.slot_atom:
6634 is_latest_in_slot = False
6637 # iter_match_pkgs yields highest version first, so
6638 # there's no need to search this pkg_type any further
6641 if not is_latest_in_slot:
6644 return is_latest, is_latest_in_slot
6646 #Set of roots we have autounmask changes for.
6649 masked_by_missing_keywords = False
6650 unstable_keyword_msg = {}
6651 for pkg in self._dynamic_config._needed_unstable_keywords:
6652 self._show_merge_list()
6653 if pkg in self._dynamic_config.digraph:
6656 unstable_keyword_msg.setdefault(root, [])
6657 is_latest, is_latest_in_slot = check_if_latest(pkg)
6658 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6659 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6660 use=self._pkg_use_enabled(pkg))
6661 for reason in mreasons:
6662 if reason.unmask_hint and \
6663 reason.unmask_hint.key == 'unstable keyword':
6664 keyword = reason.unmask_hint.value
6666 masked_by_missing_keywords = True
6668 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
6669 if autounmask_unrestricted_atoms:
6671 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
6672 elif is_latest_in_slot:
6673 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
6675 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6677 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6679 p_mask_change_msg = {}
6680 for pkg in self._dynamic_config._needed_p_mask_changes:
6681 self._show_merge_list()
6682 if pkg in self._dynamic_config.digraph:
6685 p_mask_change_msg.setdefault(root, [])
6686 is_latest, is_latest_in_slot = check_if_latest(pkg)
6687 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6688 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6689 use=self._pkg_use_enabled(pkg))
6690 for reason in mreasons:
6691 if reason.unmask_hint and \
6692 reason.unmask_hint.key == 'p_mask':
6693 keyword = reason.unmask_hint.value
6695 comment, filename = portage.getmaskingreason(
6696 pkg.cpv, metadata=pkg._metadata,
6697 settings=pkgsettings,
6698 portdb=pkg.root_config.trees["porttree"].dbapi,
6699 return_location=True)
6701 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
6703 p_mask_change_msg[root].append("# %s:\n" % filename)
6705 comment = [line for line in
6706 comment.splitlines() if line]
6707 for line in comment:
6708 p_mask_change_msg[root].append("%s\n" % line)
6709 if autounmask_unrestricted_atoms:
6711 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
6712 elif is_latest_in_slot:
6713 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
6715 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6717 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6719 use_changes_msg = {}
6720 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
6721 self._show_merge_list()
6722 if pkg in self._dynamic_config.digraph:
6725 use_changes_msg.setdefault(root, [])
6726 is_latest, is_latest_in_slot = check_if_latest(pkg)
6727 changes = needed_use_config_change[1]
6729 for flag, state in changes.items():
6731 adjustments.append(flag)
6733 adjustments.append("-" + flag)
6734 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
6736 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6737 elif is_latest_in_slot:
6738 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
6740 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6743 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
6744 self._show_merge_list()
6745 if pkg in self._dynamic_config.digraph:
6748 license_msg.setdefault(root, [])
6749 is_latest, is_latest_in_slot = check_if_latest(pkg)
6751 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
6753 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6754 elif is_latest_in_slot:
6755 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
6757 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6759 def find_config_file(abs_user_config, file_name):
6761 Searches /etc/portage for an appropriate file to append changes to.
6762 If the file_name is a file it is returned, if it is a directory, the
6763 last file in it is returned. Order of traversal is the identical to
6764 portage.util.grablines(recursive=True).
6766 file_name - String containing a file name like "package.use"
6767 return value - String. Absolute path of file to write to. None if
6768 no suitable file exists.
6770 file_path = os.path.join(abs_user_config, file_name)
6774 except OSError as e:
6775 if e.errno == errno.ENOENT:
6776 # The file doesn't exist, so we'll
6780 # Disk or file system trouble?
6783 last_file_path = None
6792 if stat.S_ISREG(st.st_mode):
6794 elif stat.S_ISDIR(st.st_mode):
6795 if os.path.basename(p) in _ignorecvs_dirs:
6798 contents = os.listdir(p)
6802 contents.sort(reverse=True)
6803 for child in contents:
6804 if child.startswith(".") or \
6805 child.endswith("~"):
6807 stack.append(os.path.join(p, child))
6809 return last_file_path
6811 write_to_file = autounmask_write and not pretend
6812 #Make sure we have a file to write to before doing any write.
6813 file_to_write_to = {}
6817 settings = self._frozen_config.roots[root].settings
6818 abs_user_config = os.path.join(
6819 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6821 if root in unstable_keyword_msg:
6822 if not os.path.exists(os.path.join(abs_user_config,
6823 "package.keywords")):
6824 filename = "package.accept_keywords"
6826 filename = "package.keywords"
6827 file_to_write_to[(abs_user_config, "package.keywords")] = \
6828 find_config_file(abs_user_config, filename)
6830 if root in p_mask_change_msg:
6831 file_to_write_to[(abs_user_config, "package.unmask")] = \
6832 find_config_file(abs_user_config, "package.unmask")
6834 if root in use_changes_msg:
6835 file_to_write_to[(abs_user_config, "package.use")] = \
6836 find_config_file(abs_user_config, "package.use")
6838 if root in license_msg:
6839 file_to_write_to[(abs_user_config, "package.license")] = \
6840 find_config_file(abs_user_config, "package.license")
6842 for (abs_user_config, f), path in file_to_write_to.items():
6844 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6846 write_to_file = not problems
6848 def format_msg(lines):
6850 for i, line in enumerate(lines):
6851 if line.startswith("#"):
6853 lines[i] = colorize("INFORM", line.rstrip()) + "\n"
6854 return "".join(lines)
6857 settings = self._frozen_config.roots[root].settings
6858 abs_user_config = os.path.join(
6859 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6862 writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
6864 def _writemsg(reason, file):
6865 writemsg(('\nThe following %s are necessary to proceed:\n'
6866 ' (see "%s" in the portage(5) man page for more details)\n')
6867 % (colorize('BAD', reason), file), noiselevel=-1)
6869 if root in unstable_keyword_msg:
6870 _writemsg('keyword changes', 'package.accept_keywords')
6871 writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
6873 if root in p_mask_change_msg:
6874 _writemsg('mask changes', 'package.unmask')
6875 writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
6877 if root in use_changes_msg:
6878 _writemsg('USE changes', 'package.use')
6879 writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
6881 if root in license_msg:
6882 _writemsg('license changes', 'package.license')
6883 writemsg(format_msg(license_msg[root]), noiselevel=-1)
6888 settings = self._frozen_config.roots[root].settings
6889 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6890 shlex_split(settings.get("CONFIG_PROTECT", "")),
6891 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6893 def write_changes(root, changes, file_to_write_to):
6894 file_contents = None
6897 _unicode_encode(file_to_write_to,
6898 encoding=_encodings['fs'], errors='strict'),
6899 mode='r', encoding=_encodings['content'],
6900 errors='replace') as f:
6901 file_contents = f.readlines()
6902 except IOError as e:
6903 if e.errno == errno.ENOENT:
6906 problems.append("!!! Failed to read '%s': %s\n" % \
6907 (file_to_write_to, e))
6908 if file_contents is not None:
6909 file_contents.extend(changes)
6910 if protect_obj[root].isprotected(file_to_write_to):
6911 # We want to force new_protect_filename to ensure
6912 # that the user will see all our changes via
6913 # dispatch-conf, even if file_to_write_to doesn't
6914 # exist yet, so we specify force=True.
6915 file_to_write_to = new_protect_filename(file_to_write_to,
6918 write_atomic(file_to_write_to, "".join(file_contents))
6919 except PortageException:
6920 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6922 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
6925 "NOTE: The --autounmask-keep-masks option will prevent emerge",
6926 " from creating package.unmask or ** keyword changes."
6930 line = colorize("INFORM", line)
6931 writemsg(line + "\n", noiselevel=-1)
6933 if ask and write_to_file and file_to_write_to:
6934 prompt = "\nWould you like to add these " + \
6935 "changes to your config files?"
6936 if userquery(prompt, enter_invalid) == 'No':
6937 write_to_file = False
6939 if write_to_file and file_to_write_to:
6941 settings = self._frozen_config.roots[root].settings
6942 abs_user_config = os.path.join(
6943 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6944 ensure_dirs(abs_user_config)
6946 if root in unstable_keyword_msg:
6947 write_changes(root, unstable_keyword_msg[root],
6948 file_to_write_to.get((abs_user_config, "package.keywords")))
6950 if root in p_mask_change_msg:
6951 write_changes(root, p_mask_change_msg[root],
6952 file_to_write_to.get((abs_user_config, "package.unmask")))
6954 if root in use_changes_msg:
6955 write_changes(root, use_changes_msg[root],
6956 file_to_write_to.get((abs_user_config, "package.use")))
6958 if root in license_msg:
6959 write_changes(root, license_msg[root],
6960 file_to_write_to.get((abs_user_config, "package.license")))
6963 writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
6965 writemsg("".join(problems), noiselevel=-1)
6966 elif write_to_file and roots:
6967 writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
6969 elif not pretend and not autounmask_write and roots:
6970 writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
6971 "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
6972 "paying special attention to mask or keyword changes that may expose\n"
6973 "experimental or unstable packages.\n",
6977 def display_problems(self):
6979 Display problems with the dependency graph such as slot collisions.
6980 This is called internally by display() to show the problems _after_
6981 the merge list where it is most likely to be seen, but if display()
6982 is not going to be called then this method should be called explicitly
6983 to ensure that the user is notified of problems with the graph.
6986 if self._dynamic_config._circular_deps_for_display is not None:
6987 self._show_circular_deps(
6988 self._dynamic_config._circular_deps_for_display)
6990 # The slot conflict display has better noise reduction than
6991 # the unsatisfied blockers display, so skip unsatisfied blockers
6992 # display if there are slot conflicts (see bug #385391).
6993 if self._dynamic_config._slot_collision_info:
6994 self._show_slot_collision_notice()
6995 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
6996 self._show_unsatisfied_blockers(
6997 self._dynamic_config._unsatisfied_blockers_for_display)
6999 self._show_missed_update()
7001 self._show_ignored_binaries()
7003 self._display_autounmask()
7005 # TODO: Add generic support for "set problem" handlers so that
7006 # the below warnings aren't special cases for world only.
7008 if self._dynamic_config._missing_args:
7009 world_problems = False
7010 if "world" in self._dynamic_config.sets[
7011 self._frozen_config.target_root].sets:
7012 # Filter out indirect members of world (from nested sets)
7013 # since only direct members of world are desired here.
7014 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
7015 for arg, atom in self._dynamic_config._missing_args:
7016 if arg.name in ("selected", "world") and atom in world_set:
7017 world_problems = True
7021 sys.stderr.write("\n!!! Problems have been " + \
7022 "detected with your world file\n")
7023 sys.stderr.write("!!! Please run " + \
7024 green("emaint --check world")+"\n\n")
7026 if self._dynamic_config._missing_args:
7027 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7028 " Ebuilds for the following packages are either all\n")
7029 sys.stderr.write(colorize("BAD", "!!!") + \
7030 " masked or don't exist:\n")
7031 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7032 self._dynamic_config._missing_args) + "\n")
7034 if self._dynamic_config._pprovided_args:
7036 for arg, atom in self._dynamic_config._pprovided_args:
7037 if isinstance(arg, SetArg):
7039 arg_atom = (atom, atom)
7042 arg_atom = (arg.arg, atom)
7043 refs = arg_refs.setdefault(arg_atom, [])
7044 if parent not in refs:
7047 msg.append(bad("\nWARNING: "))
7048 if len(self._dynamic_config._pprovided_args) > 1:
7049 msg.append("Requested packages will not be " + \
7050 "merged because they are listed in\n")
7052 msg.append("A requested package will not be " + \
7053 "merged because it is listed in\n")
7054 msg.append("package.provided:\n\n")
7055 problems_sets = set()
7056 for (arg, atom), refs in arg_refs.items():
7059 problems_sets.update(refs)
7061 ref_string = ", ".join(["'%s'" % name for name in refs])
7062 ref_string = " pulled in by " + ref_string
7063 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7065 if "selected" in problems_sets or "world" in problems_sets:
7066 msg.append("This problem can be solved in one of the following ways:\n\n")
7067 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7068 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7069 msg.append(" C) Remove offending entries from package.provided.\n\n")
7070 msg.append("The best course of action depends on the reason that an offending\n")
7071 msg.append("package.provided entry exists.\n\n")
7072 sys.stderr.write("".join(msg))
7074 masked_packages = []
7075 for pkg in self._dynamic_config._masked_license_updates:
7076 root_config = pkg.root_config
7077 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
7078 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
7079 masked_packages.append((root_config, pkgsettings,
7080 pkg.cpv, pkg.repo, pkg._metadata, mreasons))
7082 writemsg("\n" + colorize("BAD", "!!!") + \
7083 " The following updates are masked by LICENSE changes:\n",
7085 show_masked_packages(masked_packages)
7087 writemsg("\n", noiselevel=-1)
7089 masked_packages = []
7090 for pkg in self._dynamic_config._masked_installed:
7091 root_config = pkg.root_config
7092 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
7093 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
7094 masked_packages.append((root_config, pkgsettings,
7095 pkg.cpv, pkg.repo, pkg._metadata, mreasons))
7097 writemsg("\n" + colorize("BAD", "!!!") + \
7098 " The following installed packages are masked:\n",
7100 show_masked_packages(masked_packages)
7102 writemsg("\n", noiselevel=-1)
7104 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7105 self._show_unsatisfied_dep(*pargs,
7106 **portage._native_kwargs(kwargs))
7108 def saveNomergeFavorites(self):
7109 """Find atoms in favorites that are not in the mergelist and add them
7110 to the world file if necessary."""
7111 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7112 "--oneshot", "--onlydeps", "--pretend"):
7113 if x in self._frozen_config.myopts:
7115 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7116 world_set = root_config.sets["selected"]
7118 world_locked = False
7119 if hasattr(world_set, "lock"):
7123 if hasattr(world_set, "load"):
7124 world_set.load() # maybe it's changed on disk
7126 args_set = self._dynamic_config.sets[
7127 self._frozen_config.target_root].sets['__non_set_args__']
7128 added_favorites = set()
7129 for x in self._dynamic_config._set_nodes:
7130 if x.operation != "nomerge":
7133 if x.root != root_config.root:
7137 myfavkey = create_world_atom(x, args_set, root_config)
7139 if myfavkey in added_favorites:
7141 added_favorites.add(myfavkey)
7142 except portage.exception.InvalidDependString as e:
7143 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7144 (x.cpv, e), noiselevel=-1)
7145 writemsg("!!! see '%s'\n\n" % os.path.join(
7146 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
7149 for arg in self._dynamic_config._initial_arg_list:
7150 if not isinstance(arg, SetArg):
7152 if arg.root_config.root != root_config.root:
7158 if k in ("selected", "world") or \
7159 not root_config.sets[k].world_candidate:
7164 all_added.append(SETPREFIX + k)
7165 all_added.extend(added_favorites)
7169 if "--ask" in self._frozen_config.myopts:
7170 writemsg_stdout("\n", noiselevel=-1)
7172 writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
7174 writemsg_stdout("\n", noiselevel=-1)
7175 prompt = "Would you like to add these packages to your world " \
7177 enter_invalid = '--ask-enter-invalid' in \
7178 self._frozen_config.myopts
7179 if userquery(prompt, enter_invalid) == "No":
7184 if a.startswith(SETPREFIX):
7185 filename = "world_sets"
7189 ">>> Recording %s in \"%s\" favorites file...\n" %
7190 (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
7191 world_set.update(all_added)
7196 def _loadResumeCommand(self, resume_data, skip_masked=True,
7199 Add a resume command to the graph and validate it in the process. This
7200 will raise a PackageNotFound exception if a package is not available.
7205 if not isinstance(resume_data, dict):
7208 mergelist = resume_data.get("mergelist")
7209 if not isinstance(mergelist, list):
7212 favorites = resume_data.get("favorites")
7213 if isinstance(favorites, list):
7214 args = self._load_favorites(favorites)
7218 fakedb = self._dynamic_config.mydbapi
7219 serialized_tasks = []
7222 if not (isinstance(x, list) and len(x) == 4):
7224 pkg_type, myroot, pkg_key, action = x
7225 if pkg_type not in self.pkg_tree_map:
7227 if action != "merge":
7229 root_config = self._frozen_config.roots[myroot]
7231 # Use the resume "favorites" list to see if a repo was specified
7233 depgraph_sets = self._dynamic_config.sets[root_config.root]
7235 for atom in depgraph_sets.atoms.getAtoms():
7236 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
7240 atom = "=" + pkg_key
7242 atom = atom + _repo_separator + repo
7245 atom = Atom(atom, allow_repo=True)
7250 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
7251 if not self._pkg_visibility_check(pkg) or \
7252 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
7253 modified_use=self._pkg_use_enabled(pkg)):
7258 # It does no exist or it is corrupt.
7260 # TODO: log these somewhere
7262 raise portage.exception.PackageNotFound(pkg_key)
7264 if "merge" == pkg.operation and \
7265 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
7266 modified_use=self._pkg_use_enabled(pkg)):
7269 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
7271 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7273 self._dynamic_config._unsatisfied_deps_for_display.append(
7274 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7276 fakedb[myroot].cpv_inject(pkg)
7277 serialized_tasks.append(pkg)
7278 self._spinner_update()
7280 if self._dynamic_config._unsatisfied_deps_for_display:
7283 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
7284 self._dynamic_config._serialized_tasks_cache = serialized_tasks
7285 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
7287 self._select_package = self._select_pkg_from_graph
7288 self._dynamic_config.myparams["selective"] = True
7289 # Always traverse deep dependencies in order to account for
7290 # potentially unsatisfied dependencies of installed packages.
7291 # This is necessary for correct --keep-going or --resume operation
7292 # in case a package from a group of circularly dependent packages
7293 # fails. In this case, a package which has recently been installed
7294 # may have an unsatisfied circular dependency (pulled in by
7295 # PDEPEND, for example). So, even though a package is already
7296 # installed, it may not have all of it's dependencies satisfied, so
7297 # it may not be usable. If such a package is in the subgraph of
7298 # deep depenedencies of a scheduled build, that build needs to
7299 # be cancelled. In order for this type of situation to be
7300 # recognized, deep traversal of dependencies is required.
7301 self._dynamic_config.myparams["deep"] = True
7303 for task in serialized_tasks:
7304 if isinstance(task, Package) and \
7305 task.operation == "merge":
7306 if not self._add_pkg(task, None):
7309 # Packages for argument atoms need to be explicitly
7310 # added via _add_pkg() so that they are included in the
7311 # digraph (needed at least for --tree display).
7312 for arg in self._expand_set_args(args, add_to_digraph=True):
7313 for atom in arg.pset.getAtoms():
7314 pkg, existing_node = self._select_package(
7315 arg.root_config.root, atom)
7316 if existing_node is None and \
7318 if not self._add_pkg(pkg, Dependency(atom=atom,
7319 root=pkg.root, parent=arg)):
7322 # Allow unsatisfied deps here to avoid showing a masking
7323 # message for an unsatisfied dep that isn't necessarily
7325 if not self._create_graph(allow_unsatisfied=True):
7328 unsatisfied_deps = []
7329 for dep in self._dynamic_config._unsatisfied_deps:
7330 if not isinstance(dep.parent, Package):
7332 if dep.parent.operation == "merge":
7333 unsatisfied_deps.append(dep)
7336 # For unsatisfied deps of installed packages, only account for
7337 # them if they are in the subgraph of dependencies of a package
7338 # which is scheduled to be installed.
7339 unsatisfied_install = False
7341 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
7343 node = dep_stack.pop()
7344 if not isinstance(node, Package):
7346 if node.operation == "merge":
7347 unsatisfied_install = True
7349 if node in traversed:
7352 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
7354 if unsatisfied_install:
7355 unsatisfied_deps.append(dep)
7357 if masked_tasks or unsatisfied_deps:
7358 # This probably means that a required package
7359 # was dropped via --skipfirst. It makes the
7360 # resume list invalid, so convert it to a
7361 # UnsatisfiedResumeDep exception.
7362 raise self.UnsatisfiedResumeDep(self,
7363 masked_tasks + unsatisfied_deps)
7364 self._dynamic_config._serialized_tasks_cache = None
7367 except self._unknown_internal_error:
7372 def _load_favorites(self, favorites):
7374 Use a list of favorites to resume state from a
7375 previous select_files() call. This creates similar
7376 DependencyArg instances to those that would have
7377 been created by the original select_files() call.
7378 This allows Package instances to be matched with
7379 DependencyArg instances during graph creation.
7381 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7382 sets = root_config.sets
7383 depgraph_sets = self._dynamic_config.sets[root_config.root]
7386 if not isinstance(x, basestring):
7388 if x in ("system", "world"):
7390 if x.startswith(SETPREFIX):
7391 s = x[len(SETPREFIX):]
7394 if s in depgraph_sets.sets:
7397 depgraph_sets.sets[s] = pset
7398 args.append(SetArg(arg=x, pset=pset,
7399 root_config=root_config))
7402 x = Atom(x, allow_repo=True)
7403 except portage.exception.InvalidAtom:
7405 args.append(AtomArg(arg=x, atom=x,
7406 root_config=root_config))
7408 self._set_args(args)
7411 class UnsatisfiedResumeDep(portage.exception.PortageException):
7413 A dependency of a resume list is not installed. This
7414 can occur when a required package is dropped from the
7415 merge list via --skipfirst.
7417 def __init__(self, depgraph, value):
7418 portage.exception.PortageException.__init__(self, value)
7419 self.depgraph = depgraph
7421 class _internal_exception(portage.exception.PortageException):
7422 def __init__(self, value=""):
7423 portage.exception.PortageException.__init__(self, value)
7425 class _unknown_internal_error(_internal_exception):
7427 Used by the depgraph internally to terminate graph creation.
7428 The specific reason for the failure should have been dumped
7429 to stderr, unfortunately, the exact reason for the failure
7433 class _serialize_tasks_retry(_internal_exception):
7435 This is raised by the _serialize_tasks() method when it needs to
7436 be called again for some reason. The only case that it's currently
7437 used for is when neglected dependencies need to be added to the
7438 graph in order to avoid making a potentially unsafe decision.
7441 class _backtrack_mask(_internal_exception):
7443 This is raised by _show_unsatisfied_dep() when it's called with
7444 check_backtrack=True and a matching package has been masked by
7448 class _autounmask_breakage(_internal_exception):
7450 This is raised by _show_unsatisfied_dep() when it's called with
7451 check_autounmask_breakage=True and a matching package has been
7452 been disqualified due to autounmask changes.
7455 def need_restart(self):
7456 return self._dynamic_config._need_restart and \
7457 not self._dynamic_config._skip_restart
7459 def success_without_autounmask(self):
7460 return self._dynamic_config._success_without_autounmask
7462 def autounmask_breakage_detected(self):
7464 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7465 self._show_unsatisfied_dep(
7466 *pargs, check_autounmask_breakage=True,
7467 **portage._native_kwargs(kwargs))
7468 except self._autounmask_breakage:
7472 def get_backtrack_infos(self):
7473 return self._dynamic_config._backtrack_infos
7476 class _dep_check_composite_db(dbapi):
7478 A dbapi-like interface that is optimized for use in dep_check() calls.
7479 This is built on top of the existing depgraph package selection logic.
7480 Some packages that have been added to the graph may be masked from this
7481 view in order to influence the atom preference selection that occurs
7484 def __init__(self, depgraph, root):
7485 dbapi.__init__(self)
7486 self._depgraph = depgraph
7488 self._match_cache = {}
7489 self._cpv_pkg_map = {}
7491 def _clear_cache(self):
7492 self._match_cache.clear()
7493 self._cpv_pkg_map.clear()
7495 def cp_list(self, cp):
7497 Emulate cp_list just so it can be used to check for existence
7498 of new-style virtuals. Since it's a waste of time to return
7499 more than one cpv for this use case, a maximum of one cpv will
7502 if isinstance(cp, Atom):
7507 for pkg in self._depgraph._iter_match_pkgs_any(
7508 self._depgraph._frozen_config.roots[self._root], atom):
7515 def match(self, atom):
7516 cache_key = (atom, atom.unevaluated_atom)
7517 ret = self._match_cache.get(cache_key)
7522 pkg, existing = self._depgraph._select_package(self._root, atom)
7524 if pkg is not None and self._visible(pkg):
7525 self._cpv_pkg_map[pkg.cpv] = pkg
7528 if pkg is not None and \
7529 atom.slot is None and \
7530 pkg.cp.startswith("virtual/") and \
7531 (("remove" not in self._depgraph._dynamic_config.myparams and
7532 "--update" not in self._depgraph._frozen_config.myopts) or
7534 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
7535 # For new-style virtual lookahead that occurs inside dep_check()
7536 # for bug #141118, examine all slots. This is needed so that newer
7537 # slots will not unnecessarily be pulled in when a satisfying lower
7538 # slot is already installed. For example, if virtual/jdk-1.5 is
7539 # satisfied via gcj-jdk then there's no need to pull in a newer
7540 # slot to satisfy a virtual/jdk dependency, unless --update is
7544 for virt_pkg in self._depgraph._iter_match_pkgs_any(
7545 self._depgraph._frozen_config.roots[self._root], atom):
7546 if virt_pkg.cp != pkg.cp:
7548 slots.add(virt_pkg.slot)
7550 slots.remove(pkg.slot)
7552 slot_atom = atom.with_slot(slots.pop())
7553 pkg, existing = self._depgraph._select_package(
7554 self._root, slot_atom)
7557 if not self._visible(pkg):
7559 self._cpv_pkg_map[pkg.cpv] = pkg
7563 self._cpv_sort_ascending(ret)
7565 self._match_cache[cache_key] = ret
7568 def _visible(self, pkg):
7569 if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
7571 if pkg.installed and \
7572 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
7573 # Account for packages with masks (like KEYWORDS masks)
7574 # that are usually ignored in visibility checks for
7575 # installed packages, in order to handle cases like
7577 myopts = self._depgraph._frozen_config.myopts
7578 use_ebuild_visibility = myopts.get(
7579 '--use-ebuild-visibility', 'n') != 'n'
7580 avoid_update = "--update" not in myopts and \
7581 "remove" not in self._depgraph._dynamic_config.myparams
7582 usepkgonly = "--usepkgonly" in myopts
7583 if not avoid_update:
7584 if not use_ebuild_visibility and usepkgonly:
7586 elif not self._depgraph._equiv_ebuild_visible(pkg):
7589 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
7590 self._root].get(pkg.slot_atom)
7591 if in_graph is None:
7592 # Mask choices for packages which are not the highest visible
7593 # version within their slot (since they usually trigger slot
7595 highest_visible, in_graph = self._depgraph._select_package(
7596 self._root, pkg.slot_atom)
7597 # Note: highest_visible is not necessarily the real highest
7598 # visible, especially when --update is not enabled, so use
7599 # < operator instead of !=.
7600 if highest_visible is not None and pkg < highest_visible:
7602 elif in_graph != pkg:
7603 # Mask choices for packages that would trigger a slot
7604 # conflict with a previously selected package.
7608 def aux_get(self, cpv, wants):
7609 metadata = self._cpv_pkg_map[cpv]._metadata
7610 return [metadata.get(x, "") for x in wants]
7612 def match_pkgs(self, atom):
7613 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
7615 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
7617 if "--quiet" in myopts:
7618 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7619 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
7620 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7621 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
7624 s = search(root_config, spinner, "--searchdesc" in myopts,
7625 "--quiet" not in myopts, "--usepkg" in myopts,
7626 "--usepkgonly" in myopts)
7627 null_cp = portage.dep_getkey(insert_category_into_atom(
7629 cat, atom_pn = portage.catsplit(null_cp)
7630 s.searchkey = atom_pn
7631 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7634 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7635 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
7637 def _spinner_start(spinner, myopts):
7640 if "--quiet" not in myopts and \
7641 ("--pretend" in myopts or "--ask" in myopts or \
7642 "--tree" in myopts or "--verbose" in myopts):
7644 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7646 elif "--buildpkgonly" in myopts:
7650 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
7651 if "--unordered-display" in myopts:
7652 portage.writemsg_stdout("\n" + \
7653 darkgreen("These are the packages that " + \
7654 "would be %s:" % action) + "\n\n")
7656 portage.writemsg_stdout("\n" + \
7657 darkgreen("These are the packages that " + \
7658 "would be %s, in reverse order:" % action) + "\n\n")
7660 portage.writemsg_stdout("\n" + \
7661 darkgreen("These are the packages that " + \
7662 "would be %s, in order:" % action) + "\n\n")
7664 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
7665 if not show_spinner:
7666 spinner.update = spinner.update_quiet
7669 portage.writemsg_stdout("Calculating dependencies ")
7671 def _spinner_stop(spinner):
7672 if spinner is None or \
7673 spinner.update == spinner.update_quiet:
7676 if spinner.update != spinner.update_basic:
7677 # update_basic is used for non-tty output,
7678 # so don't output backspaces in that case.
7679 portage.writemsg_stdout("\b\b")
7681 portage.writemsg_stdout("... done!\n")
7683 def backtrack_depgraph(settings, trees, myopts, myparams,
7684 myaction, myfiles, spinner):
7686 Raises PackageSetNotFound if myfiles contains a missing package set.
7688 _spinner_start(spinner, myopts)
7690 return _backtrack_depgraph(settings, trees, myopts, myparams,
7691 myaction, myfiles, spinner)
7693 _spinner_stop(spinner)
7696 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
7698 debug = "--debug" in myopts
7700 max_retries = myopts.get('--backtrack', 10)
7701 max_depth = max(1, (max_retries + 1) / 2)
7702 allow_backtracking = max_retries > 0
7703 backtracker = Backtracker(max_depth)
7706 frozen_config = _frozen_depgraph_config(settings, trees,
7711 if debug and mydepgraph is not None:
7713 "\n\nbacktracking try %s \n\n" % \
7714 backtracked, noiselevel=-1, level=logging.DEBUG)
7715 mydepgraph.display_problems()
7717 backtrack_parameters = backtracker.get()
7719 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7720 frozen_config=frozen_config,
7721 allow_backtracking=allow_backtracking,
7722 backtrack_parameters=backtrack_parameters)
7723 success, favorites = mydepgraph.select_files(myfiles)
7725 if success or mydepgraph.success_without_autounmask():
7727 elif not allow_backtracking:
7729 elif backtracked >= max_retries:
7731 elif mydepgraph.need_restart():
7733 backtracker.feedback(mydepgraph.get_backtrack_infos())
7737 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
7741 "\n\nbacktracking aborted after %s tries\n\n" % \
7742 backtracked, noiselevel=-1, level=logging.DEBUG)
7743 mydepgraph.display_problems()
7745 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7746 frozen_config=frozen_config,
7747 allow_backtracking=False,
7748 backtrack_parameters=backtracker.get_best_run())
7749 success, favorites = mydepgraph.select_files(myfiles)
7751 if not success and mydepgraph.autounmask_breakage_detected():
7754 "\n\nautounmask breakage detected\n\n",
7755 noiselevel=-1, level=logging.DEBUG)
7756 mydepgraph.display_problems()
7757 myopts["--autounmask"] = "n"
7758 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7759 frozen_config=frozen_config, allow_backtracking=False)
7760 success, favorites = mydepgraph.select_files(myfiles)
7762 return (success, mydepgraph, favorites)
7765 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7767 Raises PackageSetNotFound if myfiles contains a missing package set.
7769 _spinner_start(spinner, myopts)
7771 return _resume_depgraph(settings, trees, mtimedb, myopts,
7774 _spinner_stop(spinner)
7776 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7778 Construct a depgraph for the given resume list. This will raise
7779 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
7780 TODO: Return reasons for dropped_tasks, for display/logging.
7782 @return: (success, depgraph, dropped_tasks)
7785 skip_unsatisfied = True
7786 mergelist = mtimedb["resume"]["mergelist"]
7788 frozen_config = _frozen_depgraph_config(settings, trees,
7791 mydepgraph = depgraph(settings, trees,
7792 myopts, myparams, spinner, frozen_config=frozen_config)
7794 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7795 skip_masked=skip_masked)
7796 except depgraph.UnsatisfiedResumeDep as e:
7797 if not skip_unsatisfied:
7800 graph = mydepgraph._dynamic_config.digraph
7801 unsatisfied_parents = {}
7802 traversed_nodes = set()
7803 unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
7804 while unsatisfied_stack:
7805 pkg, atom = unsatisfied_stack.pop()
7806 if atom is not None and \
7807 mydepgraph._select_pkg_from_installed(
7808 pkg.root, atom)[0] is not None:
7810 atoms = unsatisfied_parents.get(pkg)
7813 unsatisfied_parents[pkg] = atoms
7814 if atom is not None:
7816 if pkg in traversed_nodes:
7818 traversed_nodes.add(pkg)
7820 # If this package was pulled in by a parent
7821 # package scheduled for merge, removing this
7822 # package may cause the the parent package's
7823 # dependency to become unsatisfied.
7824 for parent_node, atom in \
7825 mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
7826 if not isinstance(parent_node, Package) \
7827 or parent_node.operation not in ("merge", "nomerge"):
7829 # We need to traverse all priorities here, in order to
7830 # ensure that a package with an unsatisfied depenedency
7831 # won't get pulled in, even indirectly via a soft
7833 unsatisfied_stack.append((parent_node, atom))
7835 unsatisfied_tuples = frozenset(tuple(parent_node)
7836 for parent_node in unsatisfied_parents
7837 if isinstance(parent_node, Package))
7838 pruned_mergelist = []
7840 if isinstance(x, list) and \
7841 tuple(x) not in unsatisfied_tuples:
7842 pruned_mergelist.append(x)
7844 # If the mergelist doesn't shrink then this loop is infinite.
7845 if len(pruned_mergelist) == len(mergelist):
7846 # This happens if a package can't be dropped because
7847 # it's already installed, but it has unsatisfied PDEPEND.
7849 mergelist[:] = pruned_mergelist
7851 # Exclude installed packages that have been removed from the graph due
7852 # to failure to build/install runtime dependencies after the dependent
7853 # package has already been installed.
7854 dropped_tasks.update((pkg, atoms) for pkg, atoms in \
7855 unsatisfied_parents.items() if pkg.operation != "nomerge")
7857 del e, graph, traversed_nodes, \
7858 unsatisfied_parents, unsatisfied_stack
7862 return (success, mydepgraph, dropped_tasks)
7864 def get_mask_info(root_config, cpv, pkgsettings,
7865 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
7867 metadata = dict(zip(db_keys,
7868 db.aux_get(cpv, db_keys, myrepo=myrepo)))
7872 if metadata is None:
7873 mreasons = ["corruption"]
7875 eapi = metadata['EAPI']
7876 if not portage.eapi_is_supported(eapi):
7877 mreasons = ['EAPI %s' % eapi]
7879 pkg = Package(type_name=pkg_type, root_config=root_config,
7880 cpv=cpv, built=built, installed=installed, metadata=metadata)
7883 if _pkg_use_enabled is not None:
7884 modified_use = _pkg_use_enabled(pkg)
7886 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7888 return metadata, mreasons
7890 def show_masked_packages(masked_packages):
7891 shown_licenses = set()
7892 shown_comments = set()
7893 # Maybe there is both an ebuild and a binary. Only
7894 # show one of them to avoid redundant appearance.
7896 have_eapi_mask = False
7897 for (root_config, pkgsettings, cpv, repo,
7898 metadata, mreasons) in masked_packages:
7901 output_cpv += _repo_separator + repo
7902 if output_cpv in shown_cpvs:
7904 shown_cpvs.add(output_cpv)
7905 eapi_masked = metadata is not None and \
7906 not portage.eapi_is_supported(metadata["EAPI"])
7908 have_eapi_mask = True
7909 # When masked by EAPI, metadata is mostly useless since
7910 # it doesn't contain essential things like SLOT.
7912 comment, filename = None, None
7913 if not eapi_masked and \
7914 "package.mask" in mreasons:
7915 comment, filename = \
7916 portage.getmaskingreason(
7917 cpv, metadata=metadata,
7918 settings=pkgsettings,
7919 portdb=root_config.trees["porttree"].dbapi,
7920 return_location=True)
7921 missing_licenses = []
7922 if not eapi_masked and metadata is not None:
7924 missing_licenses = \
7925 pkgsettings._getMissingLicenses(
7927 except portage.exception.InvalidDependString:
7928 # This will have already been reported
7929 # above via mreasons.
7932 writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
7935 if comment and comment not in shown_comments:
7936 writemsg(filename + ":\n" + comment + "\n",
7938 shown_comments.add(comment)
7939 portdb = root_config.trees["porttree"].dbapi
7940 for l in missing_licenses:
7941 if l in shown_licenses:
7943 l_path = portdb.findLicensePath(l)
7946 msg = ("A copy of the '%s' license" + \
7947 " is located at '%s'.\n\n") % (l, l_path)
7948 writemsg(msg, noiselevel=-1)
7949 shown_licenses.add(l)
7950 return have_eapi_mask
7952 def show_mask_docs():
7953 writemsg("For more information, see the MASKED PACKAGES "
7954 "section in the emerge\n", noiselevel=-1)
7955 writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7957 def show_blocker_docs_link():
7958 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7959 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7960 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7962 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7963 return [mreason.message for \
7964 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7966 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7967 mreasons = _getmaskingstatus(
7968 pkg, settings=pkgsettings,
7969 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7971 if not pkg.installed:
7972 if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
7973 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7974 pkg._metadata["CHOST"]))
7977 for msgs in pkg.invalid.values():
7980 _MaskReason("invalid", "invalid: %s" % (msg,)))
7982 if not pkg._metadata["SLOT"]:
7984 _MaskReason("invalid", "SLOT: undefined"))