1 # Copyright 1999-2013 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function, unicode_literals
12 from collections import deque
13 from itertools import chain
16 from portage import os, OrderedDict
17 from portage import _unicode_decode, _unicode_encode, _encodings
18 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
19 from portage.dbapi import dbapi
20 from portage.dbapi.dep_expand import dep_expand
21 from portage.dbapi._similar_name_search import similar_name_search
22 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
23 check_required_use, human_readable_required_use, match_from_list, \
25 from portage.dep._slot_operator import ignore_built_slot_operator_deps
26 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
28 from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
29 PackageNotFound, PortageException)
30 from portage.output import colorize, create_color_func, \
32 bad = create_color_func("BAD")
33 from portage.package.ebuild.config import _get_feature_flags
34 from portage.package.ebuild.getmaskingstatus import \
35 _getmaskingstatus, _MaskReason
36 from portage._sets import SETPREFIX
37 from portage._sets.base import InternalPackageSet
38 from portage.util import ConfigProtect, shlex_split, new_protect_filename
39 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
40 from portage.util import ensure_dirs
41 from portage.util import writemsg_level, write_atomic
42 from portage.util.digraph import digraph
43 from portage.util.listdir import _ignorecvs_dirs
44 from portage.util._async.TaskScheduler import TaskScheduler
45 from portage.versions import catpkgsplit
47 from _emerge.AtomArg import AtomArg
48 from _emerge.Blocker import Blocker
49 from _emerge.BlockerCache import BlockerCache
50 from _emerge.BlockerDepPriority import BlockerDepPriority
51 from _emerge.countdown import countdown
52 from _emerge.create_world_atom import create_world_atom
53 from _emerge.Dependency import Dependency
54 from _emerge.DependencyArg import DependencyArg
55 from _emerge.DepPriority import DepPriority
56 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
57 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
58 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
59 from _emerge.FakeVartree import FakeVartree
60 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
61 from _emerge.is_valid_package_atom import insert_category_into_atom, \
63 from _emerge.Package import Package
64 from _emerge.PackageArg import PackageArg
65 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
66 from _emerge.RootConfig import RootConfig
67 from _emerge.search import search
68 from _emerge.SetArg import SetArg
69 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
70 from _emerge.UnmergeDepPriority import UnmergeDepPriority
71 from _emerge.UseFlagDisplay import pkg_use_display
72 from _emerge.userquery import userquery
74 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
75 from _emerge.resolver.slot_collision import slot_conflict_handler
76 from _emerge.resolver.circular_dependency import circular_dependency_handler
77 from _emerge.resolver.output import Display
79 if sys.hexversion >= 0x3000000:
86 class _scheduler_graph_config(object):
87 def __init__(self, trees, pkg_cache, graph, mergelist):
89 self.pkg_cache = pkg_cache
91 self.mergelist = mergelist
93 def _wildcard_set(atoms):
94 pkgs = InternalPackageSet(allow_wildcard=True)
97 x = Atom(x, allow_wildcard=True, allow_repo=False)
98 except portage.exception.InvalidAtom:
99 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
103 class _frozen_depgraph_config(object):
105 def __init__(self, settings, trees, myopts, spinner):
106 self.settings = settings
107 self.target_root = settings["EROOT"]
110 if settings.get("PORTAGE_DEBUG", "") == "1":
112 self.spinner = spinner
113 self._running_root = trees[trees._running_eroot]["root_config"]
114 self.pkgsettings = {}
116 self._trees_orig = trees
118 # All Package instances
120 self._highest_license_masked = {}
121 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
122 ignore_built_slot_operator_deps = myopts.get(
123 "--ignore-built-slot-operator-deps", "n") == "y"
125 self.trees[myroot] = {}
126 # Create a RootConfig instance that references
127 # the FakeVartree instead of the real one.
128 self.roots[myroot] = RootConfig(
129 trees[myroot]["vartree"].settings,
131 trees[myroot]["root_config"].setconfig)
132 for tree in ("porttree", "bintree"):
133 self.trees[myroot][tree] = trees[myroot][tree]
134 self.trees[myroot]["vartree"] = \
135 FakeVartree(trees[myroot]["root_config"],
136 pkg_cache=self._pkg_cache,
137 pkg_root_config=self.roots[myroot],
138 dynamic_deps=dynamic_deps,
139 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
140 self.pkgsettings[myroot] = portage.config(
141 clone=self.trees[myroot]["vartree"].settings)
143 self._required_set_names = set(["world"])
145 atoms = ' '.join(myopts.get("--exclude", [])).split()
146 self.excluded_pkgs = _wildcard_set(atoms)
147 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
148 self.reinstall_atoms = _wildcard_set(atoms)
149 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
150 self.usepkg_exclude = _wildcard_set(atoms)
151 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
152 self.useoldpkg_atoms = _wildcard_set(atoms)
153 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
154 self.rebuild_exclude = _wildcard_set(atoms)
155 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
156 self.rebuild_ignore = _wildcard_set(atoms)
158 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
159 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
160 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
162 class _depgraph_sets(object):
164 # contains all sets added to the graph
166 # contains non-set atoms given as arguments
167 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
168 # contains all atoms from all sets added to the graph, including
169 # atoms given as arguments
170 self.atoms = InternalPackageSet(allow_repo=True)
171 self.atom_arg_map = {}
173 class _rebuild_config(object):
174 def __init__(self, frozen_config, backtrack_parameters):
175 self._graph = digraph()
176 self._frozen_config = frozen_config
177 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
178 self.orig_rebuild_list = self.rebuild_list.copy()
179 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
180 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
181 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
182 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
183 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
184 self.rebuild_if_unbuilt)
186 def add(self, dep_pkg, dep):
187 parent = dep.collapsed_parent
188 priority = dep.collapsed_priority
189 rebuild_exclude = self._frozen_config.rebuild_exclude
190 rebuild_ignore = self._frozen_config.rebuild_ignore
191 if (self.rebuild and isinstance(parent, Package) and
192 parent.built and priority.buildtime and
193 isinstance(dep_pkg, Package) and
194 not rebuild_exclude.findAtomForPackage(parent) and
195 not rebuild_ignore.findAtomForPackage(dep_pkg)):
196 self._graph.add(dep_pkg, parent, priority)
198 def _needs_rebuild(self, dep_pkg):
199 """Check whether packages that depend on dep_pkg need to be rebuilt."""
200 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
201 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
204 if self.rebuild_if_unbuilt:
205 # dep_pkg is being installed from source, so binary
206 # packages for parents are invalid. Force rebuild
209 trees = self._frozen_config.trees
210 vardb = trees[dep_pkg.root]["vartree"].dbapi
211 if self.rebuild_if_new_rev:
212 # Parent packages are valid if a package with the same
213 # cpv is already installed.
214 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
216 # Otherwise, parent packages are valid if a package with the same
217 # version (excluding revision) is already installed.
218 assert self.rebuild_if_new_ver
219 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
220 for inst_cpv in vardb.match(dep_pkg.slot_atom):
221 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
222 if inst_cpv_norev == cpv_norev:
227 def _trigger_rebuild(self, parent, build_deps):
228 root_slot = (parent.root, parent.slot_atom)
229 if root_slot in self.rebuild_list:
231 trees = self._frozen_config.trees
233 for slot_atom, dep_pkg in build_deps.items():
234 dep_root_slot = (dep_pkg.root, slot_atom)
235 if self._needs_rebuild(dep_pkg):
236 self.rebuild_list.add(root_slot)
238 elif ("--usepkg" in self._frozen_config.myopts and
239 (dep_root_slot in self.reinstall_list or
240 dep_root_slot in self.rebuild_list or
241 not dep_pkg.installed)):
243 # A direct rebuild dependency is being installed. We
244 # should update the parent as well to the latest binary,
245 # if that binary is valid.
247 # To validate the binary, we check whether all of the
248 # rebuild dependencies are present on the same binhost.
250 # 1) If parent is present on the binhost, but one of its
251 # rebuild dependencies is not, then the parent should
252 # be rebuilt from source.
253 # 2) Otherwise, the parent binary is assumed to be valid,
254 # because all of its rebuild dependencies are
256 bintree = trees[parent.root]["bintree"]
257 uri = bintree.get_pkgindex_uri(parent.cpv)
258 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
259 bindb = bintree.dbapi
260 if self.rebuild_if_new_ver and uri and uri != dep_uri:
261 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
262 for cpv in bindb.match(dep_pkg.slot_atom):
263 if cpv_norev == catpkgsplit(cpv)[:-1]:
264 dep_uri = bintree.get_pkgindex_uri(cpv)
267 if uri and uri != dep_uri:
268 # 1) Remote binary package is invalid because it was
269 # built without dep_pkg. Force rebuild.
270 self.rebuild_list.add(root_slot)
272 elif (parent.installed and
273 root_slot not in self.reinstall_list):
275 bin_build_time, = bindb.aux_get(parent.cpv,
279 if bin_build_time != _unicode(parent.build_time):
280 # 2) Remote binary package is valid, and local package
281 # is not up to date. Force reinstall.
284 self.reinstall_list.add(root_slot)
287 def trigger_rebuilds(self):
289 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
290 depends on pkgA at both build-time and run-time, pkgB needs to be
297 leaf_nodes = deque(graph.leaf_nodes())
299 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
300 # will always know which children are being rebuilt.
303 # We'll have to drop an edge. This should be quite rare.
304 leaf_nodes.append(graph.order[-1])
306 node = leaf_nodes.popleft()
307 if node not in graph:
308 # This can be triggered by circular dependencies.
310 slot_atom = node.slot_atom
312 # Remove our leaf node from the graph, keeping track of deps.
313 parents = graph.parent_nodes(node)
315 node_build_deps = build_deps.get(node, {})
316 for parent in parents:
318 # Ignore a direct cycle.
320 parent_bdeps = build_deps.setdefault(parent, {})
321 parent_bdeps[slot_atom] = node
322 if not graph.child_nodes(parent):
323 leaf_nodes.append(parent)
325 # Trigger rebuilds for our leaf node. Because all of our children
326 # have been processed, the build_deps will be completely filled in,
327 # and self.rebuild_list / self.reinstall_list will tell us whether
328 # any of our children need to be rebuilt or reinstalled.
329 if self._trigger_rebuild(node, node_build_deps):
335 class _dynamic_depgraph_config(object):
337 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
338 self.myparams = myparams.copy()
339 self._vdb_loaded = False
340 self._allow_backtracking = allow_backtracking
341 # Maps slot atom to package for each Package added to the graph.
342 self._slot_pkg_map = {}
343 # Maps nodes to the reasons they were selected for reinstallation.
344 self._reinstall_nodes = {}
346 # Contains a filtered view of preferred packages that are selected
347 # from available repositories.
348 self._filtered_trees = {}
349 # Contains installed packages and new packages that have been added
351 self._graph_trees = {}
352 # Caches visible packages returned from _select_package, for use in
353 # depgraph._iter_atoms_for_pkg() SLOT logic.
354 self._visible_pkgs = {}
355 #contains the args created by select_files
356 self._initial_arg_list = []
357 self.digraph = portage.digraph()
358 # manages sets added to the graph
360 # contains all nodes pulled in by self.sets
361 self._set_nodes = set()
362 # Contains only Blocker -> Uninstall edges
363 self._blocker_uninstalls = digraph()
364 # Contains only Package -> Blocker edges
365 self._blocker_parents = digraph()
366 # Contains only irrelevant Package -> Blocker edges
367 self._irrelevant_blockers = digraph()
368 # Contains only unsolvable Package -> Blocker edges
369 self._unsolvable_blockers = digraph()
370 # Contains all Blocker -> Blocked Package edges
371 self._blocked_pkgs = digraph()
372 # Contains world packages that have been protected from
373 # uninstallation but may not have been added to the graph
374 # if the graph is not complete yet.
375 self._blocked_world_pkgs = {}
376 # Contains packages whose dependencies have been traversed.
377 # This use used to check if we have accounted for blockers
378 # relevant to a package.
379 self._traversed_pkg_deps = set()
380 # This should be ordered such that the backtracker will
381 # attempt to solve conflicts which occurred earlier first,
382 # since an earlier conflict can be the cause of a conflict
383 # which occurs later.
384 self._slot_collision_info = OrderedDict()
385 # Slot collision nodes are not allowed to block other packages since
386 # blocker validation is only able to account for one package per slot.
387 self._slot_collision_nodes = set()
388 self._parent_atoms = {}
389 self._slot_conflict_handler = None
390 self._circular_dependency_handler = None
391 self._serialized_tasks_cache = None
392 self._scheduler_graph = None
393 self._displayed_list = None
394 self._pprovided_args = []
395 self._missing_args = []
396 self._masked_installed = set()
397 self._masked_license_updates = set()
398 self._unsatisfied_deps_for_display = []
399 self._unsatisfied_blockers_for_display = None
400 self._circular_deps_for_display = None
402 self._dep_disjunctive_stack = []
403 self._unsatisfied_deps = []
404 self._initially_unsatisfied_deps = []
405 self._ignored_deps = []
406 self._highest_pkg_cache = {}
408 # Binary packages that have been rejected because their USE
409 # didn't match the user's config. It maps packages to a set
410 # of flags causing the rejection.
411 self.ignored_binaries = {}
413 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
414 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
415 self._needed_license_changes = backtrack_parameters.needed_license_changes
416 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
417 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
418 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
419 self._prune_rebuilds = backtrack_parameters.prune_rebuilds
420 self._need_restart = False
421 # For conditions that always require user intervention, such as
422 # unsatisfied REQUIRED_USE (currently has no autounmask support).
423 self._skip_restart = False
424 self._backtrack_infos = {}
426 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
427 self._success_without_autounmask = False
428 self._traverse_ignored_deps = False
429 self._complete_mode = False
430 self._slot_operator_deps = {}
432 for myroot in depgraph._frozen_config.trees:
433 self.sets[myroot] = _depgraph_sets()
434 self._slot_pkg_map[myroot] = {}
435 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
436 # This dbapi instance will model the state that the vdb will
437 # have after new packages have been installed.
438 fakedb = PackageVirtualDbapi(vardb.settings)
440 self.mydbapi[myroot] = fakedb
443 graph_tree.dbapi = fakedb
444 self._graph_trees[myroot] = {}
445 self._filtered_trees[myroot] = {}
446 # Substitute the graph tree for the vartree in dep_check() since we
447 # want atom selections to be consistent with package selections
448 # have already been made.
449 self._graph_trees[myroot]["porttree"] = graph_tree
450 self._graph_trees[myroot]["vartree"] = graph_tree
451 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
452 self._graph_trees[myroot]["graph"] = self.digraph
455 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
456 self._filtered_trees[myroot]["porttree"] = filtered_tree
457 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
459 # Passing in graph_tree as the vartree here could lead to better
460 # atom selections in some cases by causing atoms for packages that
461 # have been added to the graph to be preferred over other choices.
462 # However, it can trigger atom selections that result in
463 # unresolvable direct circular dependencies. For example, this
464 # happens with gwydion-dylan which depends on either itself or
465 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
466 # gwydion-dylan-bin needs to be selected in order to avoid a
467 # an unresolvable direct circular dependency.
469 # To solve the problem described above, pass in "graph_db" so that
470 # packages that have been added to the graph are distinguishable
471 # from other available packages and installed packages. Also, pass
472 # the parent package into self._select_atoms() calls so that
473 # unresolvable direct circular dependencies can be detected and
474 # avoided when possible.
475 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
476 self._filtered_trees[myroot]["graph"] = self.digraph
477 self._filtered_trees[myroot]["vartree"] = \
478 depgraph._frozen_config.trees[myroot]["vartree"]
481 # (db, pkg_type, built, installed, db_keys)
482 if "remove" in self.myparams:
483 # For removal operations, use _dep_check_composite_db
484 # for availability and visibility checks. This provides
485 # consistency with install operations, so we don't
486 # get install/uninstall cycles like in bug #332719.
487 self._graph_trees[myroot]["porttree"] = filtered_tree
489 if "--usepkgonly" not in depgraph._frozen_config.myopts:
490 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
491 db_keys = list(portdb._aux_cache_keys)
492 dbs.append((portdb, "ebuild", False, False, db_keys))
494 if "--usepkg" in depgraph._frozen_config.myopts:
495 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
496 db_keys = list(bindb._aux_cache_keys)
497 dbs.append((bindb, "binary", True, False, db_keys))
499 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
500 db_keys = list(depgraph._frozen_config._trees_orig[myroot
501 ]["vartree"].dbapi._aux_cache_keys)
502 dbs.append((vardb, "installed", True, True, db_keys))
503 self._filtered_trees[myroot]["dbs"] = dbs
505 class depgraph(object):
507 pkg_tree_map = RootConfig.pkg_tree_map
509 def __init__(self, settings, trees, myopts, myparams, spinner,
510 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
511 if frozen_config is None:
512 frozen_config = _frozen_depgraph_config(settings, trees,
514 self._frozen_config = frozen_config
515 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
516 allow_backtracking, backtrack_parameters)
517 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
519 self._select_atoms = self._select_atoms_highest_available
520 self._select_package = self._select_pkg_highest_available
524 Load installed package metadata if appropriate. This used to be called
525 from the constructor, but that wasn't very nice since this procedure
526 is slow and it generates spinner output. So, now it's called on-demand
527 by various methods when necessary.
530 if self._dynamic_config._vdb_loaded:
533 for myroot in self._frozen_config.trees:
535 dynamic_deps = self._dynamic_config.myparams.get(
536 "dynamic_deps", "y") != "n"
537 preload_installed_pkgs = \
538 "--nodeps" not in self._frozen_config.myopts
540 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
541 if not fake_vartree.dbapi:
542 # This needs to be called for the first depgraph, but not for
543 # backtracking depgraphs that share the same frozen_config.
546 # FakeVartree.sync() populates virtuals, and we want
547 # self.pkgsettings to have them populated too.
548 self._frozen_config.pkgsettings[myroot] = \
549 portage.config(clone=fake_vartree.settings)
551 if preload_installed_pkgs:
552 vardb = fake_vartree.dbapi
553 fakedb = self._dynamic_config._graph_trees[
554 myroot]["vartree"].dbapi
558 fakedb.cpv_inject(pkg)
560 max_jobs = self._frozen_config.myopts.get("--jobs")
561 max_load = self._frozen_config.myopts.get("--load-average")
562 scheduler = TaskScheduler(
563 self._dynamic_deps_preload(fake_vartree, fakedb),
566 event_loop=fake_vartree._portdb._event_loop)
570 self._dynamic_config._vdb_loaded = True
572 def _dynamic_deps_preload(self, fake_vartree, fakedb):
573 portdb = fake_vartree._portdb
574 for pkg in fake_vartree.dbapi:
575 self._spinner_update()
576 fakedb.cpv_inject(pkg)
577 ebuild_path, repo_path = \
578 portdb.findname2(pkg.cpv, myrepo=pkg.repo)
579 if ebuild_path is None:
580 fake_vartree.dynamic_deps_preload(pkg, None)
582 metadata, ebuild_hash = portdb._pull_valid_cache(
583 pkg.cpv, ebuild_path, repo_path)
584 if metadata is not None:
585 fake_vartree.dynamic_deps_preload(pkg, metadata)
587 proc = EbuildMetadataPhase(cpv=pkg.cpv,
588 ebuild_hash=ebuild_hash,
589 portdb=portdb, repo_path=repo_path,
590 settings=portdb.doebuild_settings)
591 proc.addExitListener(
592 self._dynamic_deps_proc_exit(pkg, fake_vartree))
595 class _dynamic_deps_proc_exit(object):
597 __slots__ = ('_pkg', '_fake_vartree')
599 def __init__(self, pkg, fake_vartree):
601 self._fake_vartree = fake_vartree
603 def __call__(self, proc):
605 if proc.returncode == os.EX_OK:
606 metadata = proc.metadata
607 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
609 def _spinner_update(self):
610 if self._frozen_config.spinner:
611 self._frozen_config.spinner.update()
613 def _show_ignored_binaries(self):
615 Show binaries that have been ignored because their USE didn't
616 match the user's config.
618 if not self._dynamic_config.ignored_binaries \
619 or '--quiet' in self._frozen_config.myopts \
620 or self._dynamic_config.myparams.get(
621 "binpkg_respect_use") in ("y", "n"):
624 for pkg in list(self._dynamic_config.ignored_binaries):
626 selected_pkg = self._dynamic_config.mydbapi[pkg.root
627 ].match_pkgs(pkg.slot_atom)
632 selected_pkg = selected_pkg[-1]
633 if selected_pkg > pkg:
634 self._dynamic_config.ignored_binaries.pop(pkg)
637 if selected_pkg.installed and \
638 selected_pkg.cpv == pkg.cpv and \
639 selected_pkg.build_time == pkg.build_time:
640 # We don't care about ignored binaries when an
641 # identical installed instance is selected to
643 self._dynamic_config.ignored_binaries.pop(pkg)
646 if not self._dynamic_config.ignored_binaries:
649 self._show_merge_list()
651 writemsg("\n!!! The following binary packages have been ignored " + \
652 "due to non matching USE:\n\n", noiselevel=-1)
654 for pkg, flags in self._dynamic_config.ignored_binaries.items():
656 for flag in sorted(flags):
657 if flag not in pkg.use.enabled:
659 flag_display.append(flag)
660 flag_display = " ".join(flag_display)
661 # The user can paste this line into package.use
662 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
663 if pkg.root_config.settings["ROOT"] != "/":
664 writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
665 writemsg("\n", noiselevel=-1)
669 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
670 " from ignoring these binary packages if possible.",
671 " Using --binpkg-respect-use=y will silence this warning."
676 line = colorize("INFORM", line)
677 writemsg(line + "\n", noiselevel=-1)
679 def _get_missed_updates(self):
681 # In order to minimize noise, show only the highest
682 # missed update from each SLOT.
684 for pkg, mask_reasons in \
685 self._dynamic_config._runtime_pkg_mask.items():
687 # Exclude installed here since we only
688 # want to show available updates.
690 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
691 ].match_pkgs(pkg.slot_atom)
692 if not chosen_pkg or chosen_pkg[-1] >= pkg:
694 k = (pkg.root, pkg.slot_atom)
695 if k in missed_updates:
696 other_pkg, mask_type, parent_atoms = missed_updates[k]
699 for mask_type, parent_atoms in mask_reasons.items():
702 missed_updates[k] = (pkg, mask_type, parent_atoms)
705 return missed_updates
707 def _show_missed_update(self):
709 missed_updates = self._get_missed_updates()
711 if not missed_updates:
714 missed_update_types = {}
715 for pkg, mask_type, parent_atoms in missed_updates.values():
716 missed_update_types.setdefault(mask_type,
717 []).append((pkg, parent_atoms))
719 if '--quiet' in self._frozen_config.myopts and \
720 '--debug' not in self._frozen_config.myopts:
721 missed_update_types.pop("slot conflict", None)
722 missed_update_types.pop("missing dependency", None)
724 self._show_missed_update_slot_conflicts(
725 missed_update_types.get("slot conflict"))
727 self._show_missed_update_unsatisfied_dep(
728 missed_update_types.get("missing dependency"))
730 def _show_missed_update_unsatisfied_dep(self, missed_updates):
732 if not missed_updates:
735 self._show_merge_list()
736 backtrack_masked = []
738 for pkg, parent_atoms in missed_updates:
741 for parent, root, atom in parent_atoms:
742 self._show_unsatisfied_dep(root, atom, myparent=parent,
743 check_backtrack=True)
744 except self._backtrack_mask:
745 # This is displayed below in abbreviated form.
746 backtrack_masked.append((pkg, parent_atoms))
749 writemsg("\n!!! The following update has been skipped " + \
750 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
752 writemsg(str(pkg.slot_atom), noiselevel=-1)
753 if pkg.root_config.settings["ROOT"] != "/":
754 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
755 writemsg("\n", noiselevel=-1)
757 for parent, root, atom in parent_atoms:
758 self._show_unsatisfied_dep(root, atom, myparent=parent)
759 writemsg("\n", noiselevel=-1)
762 # These are shown in abbreviated form, in order to avoid terminal
763 # flooding from mask messages as reported in bug #285832.
764 writemsg("\n!!! The following update(s) have been skipped " + \
765 "due to unsatisfied dependencies\n" + \
766 "!!! triggered by backtracking:\n\n", noiselevel=-1)
767 for pkg, parent_atoms in backtrack_masked:
768 writemsg(str(pkg.slot_atom), noiselevel=-1)
769 if pkg.root_config.settings["ROOT"] != "/":
770 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
771 writemsg("\n", noiselevel=-1)
773 def _show_missed_update_slot_conflicts(self, missed_updates):
775 if not missed_updates:
778 self._show_merge_list()
780 msg.append("\nWARNING: One or more updates have been " + \
781 "skipped due to a dependency conflict:\n\n")
784 for pkg, parent_atoms in missed_updates:
785 msg.append(str(pkg.slot_atom))
786 if pkg.root_config.settings["ROOT"] != "/":
787 msg.append(" for %s" % (pkg.root,))
790 for parent, atom in parent_atoms:
794 msg.append(" conflicts with\n")
796 if isinstance(parent,
797 (PackageArg, AtomArg)):
798 # For PackageArg and AtomArg types, it's
799 # redundant to display the atom attribute.
800 msg.append(str(parent))
802 # Display the specific atom from SetArg or
804 msg.append("%s required by %s" % (atom, parent))
808 writemsg("".join(msg), noiselevel=-1)
810 def _show_slot_collision_notice(self):
811 """Show an informational message advising the user to mask one of the
812 the packages. In some cases it may be possible to resolve this
813 automatically, but support for backtracking (removal nodes that have
814 already been selected) will be required in order to handle all possible
818 if not self._dynamic_config._slot_collision_info:
821 self._show_merge_list()
823 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
824 handler = self._dynamic_config._slot_conflict_handler
826 conflict = handler.get_conflict()
827 writemsg(conflict, noiselevel=-1)
829 explanation = handler.get_explanation()
831 writemsg(explanation, noiselevel=-1)
834 if "--quiet" in self._frozen_config.myopts:
838 msg.append("It may be possible to solve this problem ")
839 msg.append("by using package.mask to prevent one of ")
840 msg.append("those packages from being selected. ")
841 msg.append("However, it is also possible that conflicting ")
842 msg.append("dependencies exist such that they are impossible to ")
843 msg.append("satisfy simultaneously. If such a conflict exists in ")
844 msg.append("the dependencies of two different packages, then those ")
845 msg.append("packages can not be installed simultaneously.")
846 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
847 if not self._dynamic_config._allow_backtracking and \
848 (backtrack_opt is None or \
849 (backtrack_opt > 0 and backtrack_opt < 30)):
850 msg.append(" You may want to try a larger value of the ")
851 msg.append("--backtrack option, such as --backtrack=30, ")
852 msg.append("in order to see if that will solve this conflict ")
853 msg.append("automatically.")
855 for line in textwrap.wrap(''.join(msg), 70):
856 writemsg(line + '\n', noiselevel=-1)
857 writemsg('\n', noiselevel=-1)
860 msg.append("For more information, see MASKED PACKAGES ")
861 msg.append("section in the emerge man page or refer ")
862 msg.append("to the Gentoo Handbook.")
863 for line in textwrap.wrap(''.join(msg), 70):
864 writemsg(line + '\n', noiselevel=-1)
865 writemsg('\n', noiselevel=-1)
867 def _process_slot_conflicts(self):
869 If there are any slot conflicts and backtracking is enabled,
870 _complete_graph should complete the graph before this method
871 is called, so that all relevant reverse dependencies are
872 available for use in backtracking decisions.
874 for (slot_atom, root), slot_nodes in \
875 self._dynamic_config._slot_collision_info.items():
876 self._process_slot_conflict(root, slot_atom, slot_nodes)
878 def _process_slot_conflict(self, root, slot_atom, slot_nodes):
880 Process slot conflict data to identify specific atoms which
881 lead to conflict. These atoms only match a subset of the
882 packages that have been pulled into a given slot.
885 debug = "--debug" in self._frozen_config.myopts
887 slot_parent_atoms = set()
888 for pkg in slot_nodes:
889 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
892 slot_parent_atoms.update(parent_atoms)
896 for pkg in slot_nodes:
898 if self._dynamic_config._allow_backtracking and \
899 pkg in self._dynamic_config._runtime_pkg_mask:
902 "!!! backtracking loop detected: %s %s\n" % \
904 self._dynamic_config._runtime_pkg_mask[pkg]),
905 level=logging.DEBUG, noiselevel=-1)
907 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
908 if parent_atoms is None:
910 self._dynamic_config._parent_atoms[pkg] = parent_atoms
913 for parent_atom in slot_parent_atoms:
914 if parent_atom in parent_atoms:
916 # Use package set for matching since it will match via
917 # PROVIDE when necessary, while match_from_list does not.
918 parent, atom = parent_atom
919 atom_set = InternalPackageSet(
920 initial_atoms=(atom,), allow_repo=True)
921 if atom_set.findAtomForPackage(pkg,
922 modified_use=self._pkg_use_enabled(pkg)):
923 parent_atoms.add(parent_atom)
926 conflict_atoms.setdefault(parent_atom, set()).add(pkg)
929 conflict_pkgs.append(pkg)
931 if conflict_pkgs and \
932 self._dynamic_config._allow_backtracking and \
933 not self._accept_blocker_conflicts():
935 for pkg in conflict_pkgs:
936 if self._slot_conflict_backtrack_abi(pkg,
937 slot_nodes, conflict_atoms):
938 backtrack_infos = self._dynamic_config._backtrack_infos
939 config = backtrack_infos.setdefault("config", {})
940 config.setdefault("slot_conflict_abi", set()).add(pkg)
942 remaining.append(pkg)
944 self._slot_confict_backtrack(root, slot_atom,
945 slot_parent_atoms, remaining)
947 def _slot_confict_backtrack(self, root, slot_atom,
948 all_parents, conflict_pkgs):
950 debug = "--debug" in self._frozen_config.myopts
951 existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
953 # The ordering of backtrack_data can make
954 # a difference here, because both mask actions may lead
955 # to valid, but different, solutions and the one with
956 # 'existing_node' masked is usually the better one. Because
957 # of that, we choose an order such that
958 # the backtracker will first explore the choice with
959 # existing_node masked. The backtracker reverses the
960 # order, so the order it uses is the reverse of the
961 # order shown here. See bug #339606.
962 if existing_node in conflict_pkgs and \
963 existing_node is not conflict_pkgs[-1]:
964 conflict_pkgs.remove(existing_node)
965 conflict_pkgs.append(existing_node)
966 for to_be_masked in conflict_pkgs:
967 # For missed update messages, find out which
968 # atoms matched to_be_selected that did not
969 # match to_be_masked.
971 self._dynamic_config._parent_atoms.get(to_be_masked, set())
972 conflict_atoms = set(parent_atom for parent_atom in all_parents \
973 if parent_atom not in parent_atoms)
974 backtrack_data.append((to_be_masked, conflict_atoms))
976 if len(backtrack_data) > 1:
977 # NOTE: Generally, we prefer to mask the higher
978 # version since this solves common cases in which a
979 # lower version is needed so that all dependencies
980 # will be satisfied (bug #337178). However, if
981 # existing_node happens to be installed then we
982 # mask that since this is a common case that is
983 # triggered when --update is not enabled.
984 if existing_node.installed:
986 elif any(pkg > existing_node for pkg in conflict_pkgs):
987 backtrack_data.reverse()
989 to_be_masked = backtrack_data[-1][0]
991 self._dynamic_config._backtrack_infos.setdefault(
992 "slot conflict", []).append(backtrack_data)
993 self._dynamic_config._need_restart = True
998 msg.append("backtracking due to slot conflict:")
999 msg.append(" first package: %s" % existing_node)
1000 msg.append(" package to mask: %s" % to_be_masked)
1001 msg.append(" slot: %s" % slot_atom)
1002 msg.append(" parents: %s" % ", ".join( \
1003 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1005 writemsg_level("".join("%s\n" % l for l in msg),
1006 noiselevel=-1, level=logging.DEBUG)
1008 def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
1010 If one or more conflict atoms have a slot/sub-slot dep that can be resolved
1011 by rebuilding the parent package, then schedule the rebuild via
1012 backtracking, and return True. Otherwise, return False.
1015 found_update = False
1016 for parent_atom, conflict_pkgs in conflict_atoms.items():
1017 parent, atom = parent_atom
1018 if atom.slot_operator != "=" or not parent.built:
1021 if pkg not in conflict_pkgs:
1024 for other_pkg in slot_nodes:
1025 if other_pkg in conflict_pkgs:
1028 dep = Dependency(atom=atom, child=other_pkg,
1029 parent=parent, root=pkg.root)
1031 if self._slot_operator_update_probe(dep, slot_conflict=True):
1032 self._slot_operator_update_backtrack(dep)
1037 def _slot_change_probe(self, dep):
1040 @return: True if dep.child should be rebuilt due to a change
1041 in sub-slot (without revbump, as in bug #456208).
1043 if not (isinstance(dep.parent, Package) and \
1044 not dep.parent.built and dep.child.built):
1047 root_config = self._frozen_config.roots[dep.root]
1050 matches.append(self._pkg(dep.child.cpv, "ebuild",
1051 root_config, myrepo=dep.child.repo))
1052 except PackageNotFound:
1055 for unbuilt_child in chain(matches,
1056 self._iter_match_pkgs(root_config, "ebuild",
1057 Atom("=%s" % (dep.child.cpv,)))):
1058 if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
1060 if self._frozen_config.excluded_pkgs.findAtomForPackage(
1062 modified_use=self._pkg_use_enabled(unbuilt_child)):
1064 if not self._pkg_visibility_check(unbuilt_child):
1070 if unbuilt_child.slot == dep.child.slot and \
1071 unbuilt_child.sub_slot == dep.child.sub_slot:
1074 return unbuilt_child
1076 def _slot_change_backtrack(self, dep, new_child_slot):
1078 if "--debug" in self._frozen_config.myopts:
1082 msg.append("backtracking due to slot/sub-slot change:")
1083 msg.append(" child package: %s" % child)
1084 msg.append(" child slot: %s/%s" %
1085 (child.slot, child.sub_slot))
1086 msg.append(" new child: %s" % new_child_slot)
1087 msg.append(" new child slot: %s/%s" %
1088 (new_child_slot.slot, new_child_slot.sub_slot))
1089 msg.append(" parent package: %s" % dep.parent)
1090 msg.append(" atom: %s" % dep.atom)
1092 writemsg_level("\n".join(msg),
1093 noiselevel=-1, level=logging.DEBUG)
1094 backtrack_infos = self._dynamic_config._backtrack_infos
1095 config = backtrack_infos.setdefault("config", {})
1097 # mask unwanted binary packages if necessary
1099 if not child.installed:
1100 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
1102 config.setdefault("slot_operator_mask_built", {}).update(masks)
1104 # trigger replacement of installed packages if necessary
1107 replacement_atom = self._replace_installed_atom(child)
1108 if replacement_atom is not None:
1109 reinstalls.add((child.root, replacement_atom))
1111 config.setdefault("slot_operator_replace_installed",
1112 set()).update(reinstalls)
1114 self._dynamic_config._need_restart = True
1116 def _slot_operator_update_backtrack(self, dep, new_child_slot=None):
1117 if new_child_slot is None:
1120 child = new_child_slot
1121 if "--debug" in self._frozen_config.myopts:
1125 msg.append("backtracking due to missed slot abi update:")
1126 msg.append(" child package: %s" % child)
1127 if new_child_slot is not None:
1128 msg.append(" new child slot package: %s" % new_child_slot)
1129 msg.append(" parent package: %s" % dep.parent)
1130 msg.append(" atom: %s" % dep.atom)
1132 writemsg_level("\n".join(msg),
1133 noiselevel=-1, level=logging.DEBUG)
1134 backtrack_infos = self._dynamic_config._backtrack_infos
1135 config = backtrack_infos.setdefault("config", {})
1137 # mask unwanted binary packages if necessary
1139 if new_child_slot is None:
1140 if not child.installed:
1141 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
1142 if not dep.parent.installed:
1143 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
1145 config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
1147 # trigger replacement of installed packages if necessary
1148 abi_reinstalls = set()
1149 if dep.parent.installed:
1150 replacement_atom = self._replace_installed_atom(dep.parent)
1151 if replacement_atom is not None:
1152 abi_reinstalls.add((dep.parent.root, replacement_atom))
1153 if new_child_slot is None and child.installed:
1154 replacement_atom = self._replace_installed_atom(child)
1155 if replacement_atom is not None:
1156 abi_reinstalls.add((child.root, replacement_atom))
1158 config.setdefault("slot_operator_replace_installed",
1159 set()).update(abi_reinstalls)
1161 self._dynamic_config._need_restart = True
1163 def _slot_operator_update_probe(self, dep, new_child_slot=False,
1164 slot_conflict=False):
1166 slot/sub-slot := operators tend to prevent updates from getting pulled in,
1167 since installed packages pull in packages with the slot/sub-slot that they
1168 were built against. Detect this case so that we can schedule rebuilds
1169 and reinstalls when appropriate.
1170 NOTE: This function only searches for updates that involve upgrades
1171 to higher versions, since the logic required to detect when a
1172 downgrade would be desirable is not implemented.
1175 if dep.child.installed and \
1176 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
1177 modified_use=self._pkg_use_enabled(dep.child)):
1180 if dep.parent.installed and \
1181 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1182 modified_use=self._pkg_use_enabled(dep.parent)):
1185 debug = "--debug" in self._frozen_config.myopts
1186 selective = "selective" in self._dynamic_config.myparams
1187 want_downgrade = None
1189 for replacement_parent in self._iter_similar_available(dep.parent,
1190 dep.parent.slot_atom):
1192 for atom in replacement_parent.validated_atoms:
1193 if not atom.slot_operator == "=" or \
1195 atom.cp != dep.atom.cp:
1198 # Discard USE deps, we're only searching for an approximate
1199 # pattern, and dealing with USE states is too complex for
1201 atom = atom.without_use
1203 if replacement_parent.built and \
1204 portage.dep._match_slot(atom, dep.child):
1205 # Our selected replacement_parent appears to be built
1206 # for the existing child selection. So, discard this
1207 # parent and search for another.
1210 for pkg in self._iter_similar_available(
1212 if pkg.slot == dep.child.slot and \
1213 pkg.sub_slot == dep.child.sub_slot:
1214 # If slot/sub-slot is identical, then there's
1215 # no point in updating.
1218 if pkg.slot == dep.child.slot:
1221 # the new slot only matters if the
1222 # package version is higher
1225 if pkg.slot != dep.child.slot:
1228 if want_downgrade is None:
1229 want_downgrade = self._downgrade_probe(dep.child)
1230 # be careful not to trigger a rebuild when
1231 # the only version available with a
1232 # different slot_operator is an older version
1233 if not want_downgrade:
1236 insignificant = False
1237 if not slot_conflict and \
1239 dep.parent.installed and \
1240 dep.child.installed and \
1241 dep.parent.cpv == replacement_parent.cpv and \
1242 dep.child.cpv == pkg.cpv:
1243 # Then can happen if the child's sub-slot changed
1244 # without a revision bump. The sub-slot change is
1245 # considered insignificant until one of its parent
1246 # packages needs to be rebuilt (which may trigger a
1248 insignificant = True
1254 msg.append("slot_operator_update_probe:")
1255 msg.append(" existing child package: %s" % dep.child)
1256 msg.append(" existing parent package: %s" % dep.parent)
1257 msg.append(" new child package: %s" % pkg)
1258 msg.append(" new parent package: %s" % replacement_parent)
1260 msg.append("insignificant changes detected")
1262 writemsg_level("\n".join(msg),
1263 noiselevel=-1, level=logging.DEBUG)
1274 msg.append("slot_operator_update_probe:")
1275 msg.append(" existing child package: %s" % dep.child)
1276 msg.append(" existing parent package: %s" % dep.parent)
1277 msg.append(" new child package: %s" % None)
1278 msg.append(" new parent package: %s" % None)
1280 writemsg_level("\n".join(msg),
1281 noiselevel=-1, level=logging.DEBUG)
1285 def _slot_operator_unsatisfied_probe(self, dep):
1287 if dep.parent.installed and \
1288 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1289 modified_use=self._pkg_use_enabled(dep.parent)):
1292 debug = "--debug" in self._frozen_config.myopts
1294 for replacement_parent in self._iter_similar_available(dep.parent,
1295 dep.parent.slot_atom):
1297 for atom in replacement_parent.validated_atoms:
1298 if not atom.slot_operator == "=" or \
1300 atom.cp != dep.atom.cp:
1303 # Discard USE deps, we're only searching for an approximate
1304 # pattern, and dealing with USE states is too complex for
1306 atom = atom.without_use
1308 pkg, existing_node = self._select_package(dep.root, atom,
1309 onlydeps=dep.onlydeps)
1317 msg.append("slot_operator_unsatisfied_probe:")
1318 msg.append(" existing parent package: %s" % dep.parent)
1319 msg.append(" existing parent atom: %s" % dep.atom)
1320 msg.append(" new parent package: %s" % replacement_parent)
1321 msg.append(" new child package: %s" % pkg)
1323 writemsg_level("\n".join(msg),
1324 noiselevel=-1, level=logging.DEBUG)
1332 msg.append("slot_operator_unsatisfied_probe:")
1333 msg.append(" existing parent package: %s" % dep.parent)
1334 msg.append(" existing parent atom: %s" % dep.atom)
1335 msg.append(" new parent package: %s" % None)
1336 msg.append(" new child package: %s" % None)
1338 writemsg_level("\n".join(msg),
1339 noiselevel=-1, level=logging.DEBUG)
1343 def _slot_operator_unsatisfied_backtrack(self, dep):
1347 if "--debug" in self._frozen_config.myopts:
1351 msg.append("backtracking due to unsatisfied "
1352 "built slot-operator dep:")
1353 msg.append(" parent package: %s" % parent)
1354 msg.append(" atom: %s" % dep.atom)
1356 writemsg_level("\n".join(msg),
1357 noiselevel=-1, level=logging.DEBUG)
1359 backtrack_infos = self._dynamic_config._backtrack_infos
1360 config = backtrack_infos.setdefault("config", {})
1362 # mask unwanted binary packages if necessary
1364 if not parent.installed:
1365 masks.setdefault(parent, {})["slot_operator_mask_built"] = None
1367 config.setdefault("slot_operator_mask_built", {}).update(masks)
1369 # trigger replacement of installed packages if necessary
1371 if parent.installed:
1372 replacement_atom = self._replace_installed_atom(parent)
1373 if replacement_atom is not None:
1374 reinstalls.add((parent.root, replacement_atom))
1376 config.setdefault("slot_operator_replace_installed",
1377 set()).update(reinstalls)
1379 self._dynamic_config._need_restart = True
1381 def _downgrade_probe(self, pkg):
1383 Detect cases where a downgrade of the given package is considered
1384 desirable due to the current version being masked or unavailable.
1386 available_pkg = None
1387 for available_pkg in self._iter_similar_available(pkg,
1389 if available_pkg >= pkg:
1390 # There's an available package of the same or higher
1391 # version, so downgrade seems undesirable.
1394 return available_pkg is not None
1396 def _iter_similar_available(self, graph_pkg, atom):
1398 Given a package that's in the graph, do a rough check to
1399 see if a similar package is available to install. The given
1400 graph_pkg itself may be yielded only if it's not installed.
1403 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
1404 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
1405 use_ebuild_visibility = self._frozen_config.myopts.get(
1406 '--use-ebuild-visibility', 'n') != 'n'
1408 for pkg in self._iter_match_pkgs_any(
1409 graph_pkg.root_config, atom):
1410 if pkg.cp != graph_pkg.cp:
1411 # discard old-style virtual match
1415 if pkg in self._dynamic_config._runtime_pkg_mask:
1417 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1418 modified_use=self._pkg_use_enabled(pkg)):
1420 if not self._pkg_visibility_check(pkg):
1423 if self._equiv_binary_installed(pkg):
1425 if not (not use_ebuild_visibility and
1426 (usepkgonly or useoldpkg_atoms.findAtomForPackage(
1427 pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
1428 not self._equiv_ebuild_visible(pkg):
1432 def _replace_installed_atom(self, inst_pkg):
1434 Given an installed package, generate an atom suitable for
1435 slot_operator_replace_installed backtracking info. The replacement
1436 SLOT may differ from the installed SLOT, so first search by cpv.
1439 for pkg in self._iter_similar_available(inst_pkg,
1440 Atom("=%s" % inst_pkg.cpv)):
1442 return pkg.slot_atom
1443 elif not pkg.installed:
1444 # avoid using SLOT from a built instance
1445 built_pkgs.append(pkg)
1447 for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
1449 return pkg.slot_atom
1450 elif not pkg.installed:
1451 # avoid using SLOT from a built instance
1452 built_pkgs.append(pkg)
1456 for pkg in built_pkgs:
1457 if best_version is None or pkg > best_version:
1459 return best_version.slot_atom
1463 def _slot_operator_trigger_reinstalls(self):
1465 Search for packages with slot-operator deps on older slots, and schedule
1466 rebuilds if they can link to a newer slot that's in the graph.
1469 rebuild_if_new_slot = self._dynamic_config.myparams.get(
1470 "rebuild_if_new_slot", "y") == "y"
1472 for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
1474 for dep in slot_info:
1477 if atom.slot_operator is None:
1480 if not atom.slot_operator_built:
1481 new_child_slot = self._slot_change_probe(dep)
1482 if new_child_slot is not None:
1483 self._slot_change_backtrack(dep, new_child_slot)
1486 if not (dep.parent and
1487 isinstance(dep.parent, Package) and dep.parent.built):
1490 # Check for slot update first, since we don't want to
1491 # trigger reinstall of the child package when a newer
1492 # slot will be used instead.
1493 if rebuild_if_new_slot:
1494 new_child = self._slot_operator_update_probe(dep,
1495 new_child_slot=True)
1497 self._slot_operator_update_backtrack(dep,
1498 new_child_slot=new_child)
1502 if self._slot_operator_update_probe(dep):
1503 self._slot_operator_update_backtrack(dep)
1506 def _reinstall_for_flags(self, pkg, forced_flags,
1507 orig_use, orig_iuse, cur_use, cur_iuse):
1508 """Return a set of flags that trigger reinstallation, or None if there
1509 are no such flags."""
1511 # binpkg_respect_use: Behave like newuse by default. If newuse is
1512 # False and changed_use is True, then behave like changed_use.
1513 binpkg_respect_use = (pkg.built and
1514 self._dynamic_config.myparams.get("binpkg_respect_use")
1516 newuse = "--newuse" in self._frozen_config.myopts
1517 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
1518 feature_flags = _get_feature_flags(
1519 _get_eapi_attrs(pkg.eapi))
1521 if newuse or (binpkg_respect_use and not changed_use):
1522 flags = set(orig_iuse.symmetric_difference(
1523 cur_iuse).difference(forced_flags))
1524 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
1525 cur_iuse.intersection(cur_use)))
1526 flags.difference_update(feature_flags)
1530 elif changed_use or binpkg_respect_use:
1531 flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
1532 cur_iuse.intersection(cur_use)))
1533 flags.difference_update(feature_flags)
1538 def _create_graph(self, allow_unsatisfied=False):
1539 dep_stack = self._dynamic_config._dep_stack
1540 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
1541 while dep_stack or dep_disjunctive_stack:
1542 self._spinner_update()
1544 dep = dep_stack.pop()
1545 if isinstance(dep, Package):
1546 if not self._add_pkg_deps(dep,
1547 allow_unsatisfied=allow_unsatisfied):
1550 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
1552 if dep_disjunctive_stack:
1553 if not self._pop_disjunction(allow_unsatisfied):
1557 def _expand_set_args(self, input_args, add_to_digraph=False):
1559 Iterate over a list of DependencyArg instances and yield all
1560 instances given in the input together with additional SetArg
1561 instances that are generated from nested sets.
1562 @param input_args: An iterable of DependencyArg instances
1563 @type input_args: Iterable
1564 @param add_to_digraph: If True then add SetArg instances
1565 to the digraph, in order to record parent -> child
1566 relationships from nested sets
1567 @type add_to_digraph: Boolean
1569 @return: All args given in the input together with additional
1570 SetArg instances that are generated from nested sets
1573 traversed_set_args = set()
1575 for arg in input_args:
1576 if not isinstance(arg, SetArg):
1580 root_config = arg.root_config
1581 depgraph_sets = self._dynamic_config.sets[root_config.root]
1584 arg = arg_stack.pop()
1585 if arg in traversed_set_args:
1587 traversed_set_args.add(arg)
1590 self._dynamic_config.digraph.add(arg, None,
1591 priority=BlockerDepPriority.instance)
1595 # Traverse nested sets and add them to the stack
1596 # if they're not already in the graph. Also, graph
1597 # edges between parent and nested sets.
1598 for token in arg.pset.getNonAtoms():
1599 if not token.startswith(SETPREFIX):
1601 s = token[len(SETPREFIX):]
1602 nested_set = depgraph_sets.sets.get(s)
1603 if nested_set is None:
1604 nested_set = root_config.sets.get(s)
1605 if nested_set is not None:
1606 nested_arg = SetArg(arg=token, pset=nested_set,
1607 root_config=root_config)
1608 arg_stack.append(nested_arg)
1610 self._dynamic_config.digraph.add(nested_arg, arg,
1611 priority=BlockerDepPriority.instance)
1612 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
1614 def _add_dep(self, dep, allow_unsatisfied=False):
1615 debug = "--debug" in self._frozen_config.myopts
1616 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
1617 nodeps = "--nodeps" in self._frozen_config.myopts
1619 if not buildpkgonly and \
1621 not dep.collapsed_priority.ignored and \
1622 not dep.collapsed_priority.optional and \
1623 dep.parent not in self._dynamic_config._slot_collision_nodes:
1624 if dep.parent.onlydeps:
1625 # It's safe to ignore blockers if the
1626 # parent is an --onlydeps node.
1628 # The blocker applies to the root where
1629 # the parent is or will be installed.
1630 blocker = Blocker(atom=dep.atom,
1631 eapi=dep.parent.eapi,
1632 priority=dep.priority, root=dep.parent.root)
1633 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
1636 if dep.child is None:
1637 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
1638 onlydeps=dep.onlydeps)
1640 # The caller has selected a specific package
1641 # via self._minimize_packages().
1643 existing_node = self._dynamic_config._slot_pkg_map[
1644 dep.root].get(dep_pkg.slot_atom)
1647 if (dep.collapsed_priority.optional or
1648 dep.collapsed_priority.ignored):
1649 # This is an unnecessary build-time dep.
1651 if allow_unsatisfied:
1652 self._dynamic_config._unsatisfied_deps.append(dep)
1654 self._dynamic_config._unsatisfied_deps_for_display.append(
1655 ((dep.root, dep.atom), {"myparent":dep.parent}))
1657 # The parent node should not already be in
1658 # runtime_pkg_mask, since that would trigger an
1659 # infinite backtracking loop.
1660 if self._dynamic_config._allow_backtracking:
1661 if dep.parent in self._dynamic_config._runtime_pkg_mask:
1664 "!!! backtracking loop detected: %s %s\n" % \
1666 self._dynamic_config._runtime_pkg_mask[
1667 dep.parent]), noiselevel=-1)
1668 elif dep.atom.slot_operator_built and \
1669 self._slot_operator_unsatisfied_probe(dep):
1670 self._slot_operator_unsatisfied_backtrack(dep)
1672 elif not self.need_restart():
1673 # Do not backtrack if only USE have to be changed in
1674 # order to satisfy the dependency.
1675 dep_pkg, existing_node = \
1676 self._select_package(dep.root, dep.atom.without_use,
1677 onlydeps=dep.onlydeps)
1679 self._dynamic_config._backtrack_infos["missing dependency"] = dep
1680 self._dynamic_config._need_restart = True
1685 msg.append("backtracking due to unsatisfied dep:")
1686 msg.append(" parent: %s" % dep.parent)
1687 msg.append(" priority: %s" % dep.priority)
1688 msg.append(" root: %s" % dep.root)
1689 msg.append(" atom: %s" % dep.atom)
1691 writemsg_level("".join("%s\n" % l for l in msg),
1692 noiselevel=-1, level=logging.DEBUG)
1696 self._rebuild.add(dep_pkg, dep)
1698 ignore = dep.collapsed_priority.ignored and \
1699 not self._dynamic_config._traverse_ignored_deps
1700 if not ignore and not self._add_pkg(dep_pkg, dep):
1704 def _check_slot_conflict(self, pkg, atom):
1705 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1708 matches = pkg.cpv == existing_node.cpv
1709 if pkg != existing_node and \
1711 # Use package set for matching since it will match via
1712 # PROVIDE when necessary, while match_from_list does not.
1713 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1714 allow_repo=True).findAtomForPackage(existing_node,
1715 modified_use=self._pkg_use_enabled(existing_node)))
1717 return (existing_node, matches)
1719 def _add_pkg(self, pkg, dep):
1721 Adds a package to the depgraph, queues dependencies, and handles
1724 debug = "--debug" in self._frozen_config.myopts
1731 myparent = dep.parent
1732 priority = dep.priority
1734 if priority is None:
1735 priority = DepPriority()
1739 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1740 pkg_use_display(pkg, self._frozen_config.myopts,
1741 modified_use=self._pkg_use_enabled(pkg))),
1742 level=logging.DEBUG, noiselevel=-1)
1743 if isinstance(myparent,
1744 (PackageArg, AtomArg)):
1745 # For PackageArg and AtomArg types, it's
1746 # redundant to display the atom attribute.
1748 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1749 level=logging.DEBUG, noiselevel=-1)
1751 # Display the specific atom from SetArg or
1754 if dep.atom is not dep.atom.unevaluated_atom:
1755 uneval = " (%s)" % (dep.atom.unevaluated_atom,)
1757 "%s%s%s required by %s\n" %
1758 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
1759 level=logging.DEBUG, noiselevel=-1)
1761 # Ensure that the dependencies of the same package
1762 # are never processed more than once.
1763 previously_added = pkg in self._dynamic_config.digraph
1765 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1770 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1771 except portage.exception.InvalidDependString as e:
1772 if not pkg.installed:
1773 # should have been masked before it was selected
1777 # NOTE: REQUIRED_USE checks are delayed until after
1778 # package selection, since we want to prompt the user
1779 # for USE adjustment rather than have REQUIRED_USE
1780 # affect package selection and || dep choices.
1781 if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
1782 eapi_has_required_use(pkg.eapi):
1783 required_use_is_sat = check_required_use(
1784 pkg._metadata["REQUIRED_USE"],
1785 self._pkg_use_enabled(pkg),
1786 pkg.iuse.is_valid_flag,
1788 if not required_use_is_sat:
1789 if dep.atom is not None and dep.parent is not None:
1790 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1793 for parent_atom in arg_atoms:
1794 parent, atom = parent_atom
1795 self._add_parent_atom(pkg, parent_atom)
1799 atom = Atom("=" + pkg.cpv)
1800 self._dynamic_config._unsatisfied_deps_for_display.append(
1802 {"myparent" : dep.parent, "show_req_use" : pkg}))
1803 self._dynamic_config._skip_restart = True
1806 if not pkg.onlydeps:
1808 existing_node, existing_node_matches = \
1809 self._check_slot_conflict(pkg, dep.atom)
1810 slot_collision = False
1812 if existing_node_matches:
1813 # The existing node can be reused.
1814 if pkg != existing_node:
1816 previously_added = True
1818 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1819 except InvalidDependString as e:
1820 if not pkg.installed:
1821 # should have been masked before
1827 "%s%s %s\n" % ("Re-used Child:".ljust(15),
1828 pkg, pkg_use_display(pkg,
1829 self._frozen_config.myopts,
1830 modified_use=self._pkg_use_enabled(pkg))),
1831 level=logging.DEBUG, noiselevel=-1)
1834 self._add_slot_conflict(pkg)
1837 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1838 existing_node, pkg_use_display(existing_node,
1839 self._frozen_config.myopts,
1840 modified_use=self._pkg_use_enabled(existing_node))),
1841 level=logging.DEBUG, noiselevel=-1)
1843 slot_collision = True
1846 # Now add this node to the graph so that self.display()
1847 # can show use flags and --tree portage.output. This node is
1848 # only being partially added to the graph. It must not be
1849 # allowed to interfere with the other nodes that have been
1850 # added. Do not overwrite data for existing nodes in
1851 # self._dynamic_config.mydbapi since that data will be used for blocker
1853 # Even though the graph is now invalid, continue to process
1854 # dependencies so that things like --fetchonly can still
1855 # function despite collisions.
1857 elif not previously_added:
1858 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1859 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1860 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1861 self._dynamic_config._highest_pkg_cache.clear()
1862 self._check_masks(pkg)
1864 if not pkg.installed:
1865 # Allow this package to satisfy old-style virtuals in case it
1866 # doesn't already. Any pre-existing providers will be preferred
1869 pkgsettings.setinst(pkg.cpv, pkg._metadata)
1870 # For consistency, also update the global virtuals.
1871 settings = self._frozen_config.roots[pkg.root].settings
1873 settings.setinst(pkg.cpv, pkg._metadata)
1875 except portage.exception.InvalidDependString:
1876 if not pkg.installed:
1877 # should have been masked before it was selected
1881 self._dynamic_config._set_nodes.add(pkg)
1883 # Do this even for onlydeps, so that the
1884 # parent/child relationship is always known in case
1885 # self._show_slot_collision_notice() needs to be called later.
1886 # If a direct circular dependency is not an unsatisfied
1887 # buildtime dependency then drop it here since otherwise
1888 # it can skew the merge order calculation in an unwanted
1890 if pkg != dep.parent or \
1891 (priority.buildtime and not priority.satisfied):
1892 self._dynamic_config.digraph.add(pkg,
1893 dep.parent, priority=priority)
1894 if dep.atom is not None and dep.parent is not None:
1895 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1898 for parent_atom in arg_atoms:
1899 parent, atom = parent_atom
1900 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1901 self._add_parent_atom(pkg, parent_atom)
1903 # This section determines whether we go deeper into dependencies or not.
1904 # We want to go deeper on a few occasions:
1905 # Installing package A, we need to make sure package A's deps are met.
1906 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1907 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1908 if arg_atoms and depth > 0:
1909 for parent, atom in arg_atoms:
1910 if parent.reset_depth:
1914 if previously_added and pkg.depth is not None:
1915 depth = min(pkg.depth, depth)
1917 deep = self._dynamic_config.myparams.get("deep", 0)
1918 update = "--update" in self._frozen_config.myopts
1920 dep.want_update = (not self._dynamic_config._complete_mode and
1921 (arg_atoms or update) and
1922 not (deep is not True and depth > deep))
1925 if (not pkg.onlydeps and
1926 dep.atom and dep.atom.slot_operator is not None):
1927 self._add_slot_operator_dep(dep)
1929 recurse = deep is True or depth + 1 <= deep
1930 dep_stack = self._dynamic_config._dep_stack
1931 if "recurse" not in self._dynamic_config.myparams:
1933 elif pkg.installed and not recurse:
1934 dep_stack = self._dynamic_config._ignored_deps
1936 self._spinner_update()
1938 if not previously_added:
1939 dep_stack.append(pkg)
1942 def _check_masks(self, pkg):
1944 slot_key = (pkg.root, pkg.slot_atom)
1946 # Check for upgrades in the same slot that are
1947 # masked due to a LICENSE change in a newer
1948 # version that is not masked for any other reason.
1949 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1950 if other_pkg is not None and pkg < other_pkg:
1951 self._dynamic_config._masked_license_updates.add(other_pkg)
1953 def _add_parent_atom(self, pkg, parent_atom):
1954 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1955 if parent_atoms is None:
1956 parent_atoms = set()
1957 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1958 parent_atoms.add(parent_atom)
1960 def _add_slot_operator_dep(self, dep):
1961 slot_key = (dep.root, dep.child.slot_atom)
1962 slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
1963 if slot_info is None:
1965 self._dynamic_config._slot_operator_deps[slot_key] = slot_info
1966 slot_info.append(dep)
1968 def _add_slot_conflict(self, pkg):
1969 self._dynamic_config._slot_collision_nodes.add(pkg)
1970 slot_key = (pkg.slot_atom, pkg.root)
1971 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1972 if slot_nodes is None:
1974 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1975 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1978 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1981 metadata = pkg._metadata
1982 removal_action = "remove" in self._dynamic_config.myparams
1983 eapi_attrs = _get_eapi_attrs(pkg.eapi)
1986 for k in Package._dep_keys:
1987 edepend[k] = metadata[k]
1989 if not pkg.built and \
1990 "--buildpkgonly" in self._frozen_config.myopts and \
1991 "deep" not in self._dynamic_config.myparams:
1992 edepend["RDEPEND"] = ""
1993 edepend["PDEPEND"] = ""
1995 ignore_build_time_deps = False
1996 if pkg.built and not removal_action:
1997 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1998 # Pull in build time deps as requested, but marked them as
1999 # "optional" since they are not strictly required. This allows
2000 # more freedom in the merge order calculation for solving
2001 # circular dependencies. Don't convert to PDEPEND since that
2002 # could make --with-bdeps=y less effective if it is used to
2003 # adjust merge order to prevent built_with_use() calls from
2007 ignore_build_time_deps = True
2009 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
2010 # Removal actions never traverse ignored buildtime
2011 # dependencies, so it's safe to discard them early.
2012 edepend["DEPEND"] = ""
2013 edepend["HDEPEND"] = ""
2014 ignore_build_time_deps = True
2016 ignore_depend_deps = ignore_build_time_deps
2017 ignore_hdepend_deps = ignore_build_time_deps
2020 depend_root = myroot
2022 if eapi_attrs.hdepend:
2023 depend_root = myroot
2025 depend_root = self._frozen_config._running_root.root
2026 root_deps = self._frozen_config.myopts.get("--root-deps")
2027 if root_deps is not None:
2028 if root_deps is True:
2029 depend_root = myroot
2030 elif root_deps == "rdeps":
2031 ignore_depend_deps = True
2033 # If rebuild mode is not enabled, it's safe to discard ignored
2034 # build-time dependencies. If you want these deps to be traversed
2035 # in "complete" mode then you need to specify --with-bdeps=y.
2036 if not self._rebuild.rebuild:
2037 if ignore_depend_deps:
2038 edepend["DEPEND"] = ""
2039 if ignore_hdepend_deps:
2040 edepend["HDEPEND"] = ""
2043 (depend_root, edepend["DEPEND"],
2044 self._priority(buildtime=True,
2045 optional=(pkg.built or ignore_depend_deps),
2046 ignored=ignore_depend_deps)),
2047 (self._frozen_config._running_root.root, edepend["HDEPEND"],
2048 self._priority(buildtime=True,
2049 optional=(pkg.built or ignore_hdepend_deps),
2050 ignored=ignore_hdepend_deps)),
2051 (myroot, edepend["RDEPEND"],
2052 self._priority(runtime=True)),
2053 (myroot, edepend["PDEPEND"],
2054 self._priority(runtime_post=True))
2057 debug = "--debug" in self._frozen_config.myopts
2059 for dep_root, dep_string, dep_priority in deps:
2063 writemsg_level("\nParent: %s\n" % (pkg,),
2064 noiselevel=-1, level=logging.DEBUG)
2065 writemsg_level("Depstring: %s\n" % (dep_string,),
2066 noiselevel=-1, level=logging.DEBUG)
2067 writemsg_level("Priority: %s\n" % (dep_priority,),
2068 noiselevel=-1, level=logging.DEBUG)
2071 dep_string = portage.dep.use_reduce(dep_string,
2072 uselist=self._pkg_use_enabled(pkg),
2073 is_valid_flag=pkg.iuse.is_valid_flag,
2074 opconvert=True, token_class=Atom,
2076 except portage.exception.InvalidDependString as e:
2077 if not pkg.installed:
2078 # should have been masked before it was selected
2082 # Try again, but omit the is_valid_flag argument, since
2083 # invalid USE conditionals are a common problem and it's
2084 # practical to ignore this issue for installed packages.
2086 dep_string = portage.dep.use_reduce(dep_string,
2087 uselist=self._pkg_use_enabled(pkg),
2088 opconvert=True, token_class=Atom,
2090 except portage.exception.InvalidDependString as e:
2091 self._dynamic_config._masked_installed.add(pkg)
2096 dep_string = list(self._queue_disjunctive_deps(
2097 pkg, dep_root, dep_priority, dep_string))
2098 except portage.exception.InvalidDependString as e:
2100 self._dynamic_config._masked_installed.add(pkg)
2104 # should have been masked before it was selected
2110 if not self._add_pkg_dep_string(
2111 pkg, dep_root, dep_priority, dep_string,
2115 self._dynamic_config._traversed_pkg_deps.add(pkg)
2118 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2120 _autounmask_backup = self._dynamic_config._autounmask
2121 if dep_priority.optional or dep_priority.ignored:
2122 # Temporarily disable autounmask for deps that
2123 # don't necessarily need to be satisfied.
2124 self._dynamic_config._autounmask = False
2126 return self._wrapped_add_pkg_dep_string(
2127 pkg, dep_root, dep_priority, dep_string,
2130 self._dynamic_config._autounmask = _autounmask_backup
2132 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
2133 dep_string, allow_unsatisfied):
2134 depth = pkg.depth + 1
2135 deep = self._dynamic_config.myparams.get("deep", 0)
2136 recurse_satisfied = deep is True or depth <= deep
2137 debug = "--debug" in self._frozen_config.myopts
2138 strict = pkg.type_name != "installed"
2141 writemsg_level("\nParent: %s\n" % (pkg,),
2142 noiselevel=-1, level=logging.DEBUG)
2143 dep_repr = portage.dep.paren_enclose(dep_string,
2144 unevaluated_atom=True, opconvert=True)
2145 writemsg_level("Depstring: %s\n" % (dep_repr,),
2146 noiselevel=-1, level=logging.DEBUG)
2147 writemsg_level("Priority: %s\n" % (dep_priority,),
2148 noiselevel=-1, level=logging.DEBUG)
2151 selected_atoms = self._select_atoms(dep_root,
2152 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
2153 strict=strict, priority=dep_priority)
2154 except portage.exception.InvalidDependString:
2156 self._dynamic_config._masked_installed.add(pkg)
2159 # should have been masked before it was selected
2163 writemsg_level("Candidates: %s\n" % \
2164 ([str(x) for x in selected_atoms[pkg]],),
2165 noiselevel=-1, level=logging.DEBUG)
2167 root_config = self._frozen_config.roots[dep_root]
2168 vardb = root_config.trees["vartree"].dbapi
2169 traversed_virt_pkgs = set()
2171 reinstall_atoms = self._frozen_config.reinstall_atoms
2172 for atom, child in self._minimize_children(
2173 pkg, dep_priority, root_config, selected_atoms[pkg]):
2175 # If this was a specially generated virtual atom
2176 # from dep_check, map it back to the original, in
2177 # order to avoid distortion in places like display
2178 # or conflict resolution code.
2179 is_virt = hasattr(atom, '_orig_atom')
2180 atom = getattr(atom, '_orig_atom', atom)
2182 if atom.blocker and \
2183 (dep_priority.optional or dep_priority.ignored):
2184 # For --with-bdeps, ignore build-time only blockers
2185 # that originate from built packages.
2188 mypriority = dep_priority.copy()
2189 if not atom.blocker:
2190 inst_pkgs = [inst_pkg for inst_pkg in
2191 reversed(vardb.match_pkgs(atom))
2192 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2193 modified_use=self._pkg_use_enabled(inst_pkg))]
2195 for inst_pkg in inst_pkgs:
2196 if self._pkg_visibility_check(inst_pkg):
2198 mypriority.satisfied = inst_pkg
2200 if not mypriority.satisfied:
2201 # none visible, so use highest
2202 mypriority.satisfied = inst_pkgs[0]
2204 dep = Dependency(atom=atom,
2205 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
2206 priority=mypriority, root=dep_root)
2208 # In some cases, dep_check will return deps that shouldn't
2209 # be proccessed any further, so they are identified and
2210 # discarded here. Try to discard as few as possible since
2211 # discarded dependencies reduce the amount of information
2212 # available for optimization of merge order.
2214 if not atom.blocker and \
2215 not recurse_satisfied and \
2216 mypriority.satisfied and \
2217 mypriority.satisfied.visible and \
2218 dep.child is not None and \
2219 not dep.child.installed and \
2220 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2221 dep.child.slot_atom) is None:
2224 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2225 except InvalidDependString:
2226 if not dep.child.installed:
2230 # Existing child selection may not be valid unless
2231 # it's added to the graph immediately, since "complete"
2232 # mode may select a different child later.
2235 self._dynamic_config._ignored_deps.append(dep)
2238 if dep_priority.ignored and \
2239 not self._dynamic_config._traverse_ignored_deps:
2240 if is_virt and dep.child is not None:
2241 traversed_virt_pkgs.add(dep.child)
2243 self._dynamic_config._ignored_deps.append(dep)
2245 if not self._add_dep(dep,
2246 allow_unsatisfied=allow_unsatisfied):
2248 if is_virt and dep.child is not None:
2249 traversed_virt_pkgs.add(dep.child)
2251 selected_atoms.pop(pkg)
2253 # Add selected indirect virtual deps to the graph. This
2254 # takes advantage of circular dependency avoidance that's done
2255 # by dep_zapdeps. We preserve actual parent/child relationships
2256 # here in order to avoid distorting the dependency graph like
2257 # <=portage-2.1.6.x did.
2258 for virt_dep, atoms in selected_atoms.items():
2260 virt_pkg = virt_dep.child
2261 if virt_pkg not in traversed_virt_pkgs:
2265 writemsg_level("\nCandidates: %s: %s\n" % \
2266 (virt_pkg.cpv, [str(x) for x in atoms]),
2267 noiselevel=-1, level=logging.DEBUG)
2269 if not dep_priority.ignored or \
2270 self._dynamic_config._traverse_ignored_deps:
2272 inst_pkgs = [inst_pkg for inst_pkg in
2273 reversed(vardb.match_pkgs(virt_dep.atom))
2274 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2275 modified_use=self._pkg_use_enabled(inst_pkg))]
2277 for inst_pkg in inst_pkgs:
2278 if self._pkg_visibility_check(inst_pkg):
2280 virt_dep.priority.satisfied = inst_pkg
2282 if not virt_dep.priority.satisfied:
2283 # none visible, so use highest
2284 virt_dep.priority.satisfied = inst_pkgs[0]
2286 if not self._add_pkg(virt_pkg, virt_dep):
2289 for atom, child in self._minimize_children(
2290 pkg, self._priority(runtime=True), root_config, atoms):
2292 # If this was a specially generated virtual atom
2293 # from dep_check, map it back to the original, in
2294 # order to avoid distortion in places like display
2295 # or conflict resolution code.
2296 is_virt = hasattr(atom, '_orig_atom')
2297 atom = getattr(atom, '_orig_atom', atom)
2299 # This is a GLEP 37 virtual, so its deps are all runtime.
2300 mypriority = self._priority(runtime=True)
2301 if not atom.blocker:
2302 inst_pkgs = [inst_pkg for inst_pkg in
2303 reversed(vardb.match_pkgs(atom))
2304 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2305 modified_use=self._pkg_use_enabled(inst_pkg))]
2307 for inst_pkg in inst_pkgs:
2308 if self._pkg_visibility_check(inst_pkg):
2310 mypriority.satisfied = inst_pkg
2312 if not mypriority.satisfied:
2313 # none visible, so use highest
2314 mypriority.satisfied = inst_pkgs[0]
2316 # Dependencies of virtuals are considered to have the
2317 # same depth as the virtual itself.
2318 dep = Dependency(atom=atom,
2319 blocker=atom.blocker, child=child, depth=virt_dep.depth,
2320 parent=virt_pkg, priority=mypriority, root=dep_root,
2321 collapsed_parent=pkg, collapsed_priority=dep_priority)
2324 if not atom.blocker and \
2325 not recurse_satisfied and \
2326 mypriority.satisfied and \
2327 mypriority.satisfied.visible and \
2328 dep.child is not None and \
2329 not dep.child.installed and \
2330 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2331 dep.child.slot_atom) is None:
2334 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2335 except InvalidDependString:
2336 if not dep.child.installed:
2342 self._dynamic_config._ignored_deps.append(dep)
2345 if dep_priority.ignored and \
2346 not self._dynamic_config._traverse_ignored_deps:
2347 if is_virt and dep.child is not None:
2348 traversed_virt_pkgs.add(dep.child)
2350 self._dynamic_config._ignored_deps.append(dep)
2352 if not self._add_dep(dep,
2353 allow_unsatisfied=allow_unsatisfied):
2355 if is_virt and dep.child is not None:
2356 traversed_virt_pkgs.add(dep.child)
2359 writemsg_level("\nExiting... %s\n" % (pkg,),
2360 noiselevel=-1, level=logging.DEBUG)
2364 def _minimize_children(self, parent, priority, root_config, atoms):
2366 Selects packages to satisfy the given atoms, and minimizes the
2367 number of selected packages. This serves to identify and eliminate
2368 redundant package selections when multiple atoms happen to specify
2378 dep_pkg, existing_node = self._select_package(
2379 root_config.root, atom)
2383 atom_pkg_map[atom] = dep_pkg
2385 if len(atom_pkg_map) < 2:
2386 for item in atom_pkg_map.items():
2392 for atom, pkg in atom_pkg_map.items():
2393 pkg_atom_map.setdefault(pkg, set()).add(atom)
2394 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
2396 for pkgs in cp_pkg_map.values():
2399 for atom in pkg_atom_map[pkg]:
2403 # Use a digraph to identify and eliminate any
2404 # redundant package selections.
2405 atom_pkg_graph = digraph()
2408 for atom in pkg_atom_map[pkg1]:
2410 atom_pkg_graph.add(pkg1, atom)
2411 atom_set = InternalPackageSet(initial_atoms=(atom,),
2416 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
2417 atom_pkg_graph.add(pkg2, atom)
2420 eliminate_pkg = True
2421 for atom in atom_pkg_graph.parent_nodes(pkg):
2422 if len(atom_pkg_graph.child_nodes(atom)) < 2:
2423 eliminate_pkg = False
2426 atom_pkg_graph.remove(pkg)
2428 # Yield ~, =*, < and <= atoms first, since those are more likely to
2429 # cause slot conflicts, and we want those atoms to be displayed
2430 # in the resulting slot conflict message (see bug #291142).
2431 # Give similar treatment to slot/sub-slot atoms.
2435 for atom in cp_atoms:
2436 if atom.slot_operator_built:
2437 abi_atoms.append(atom)
2440 for child_pkg in atom_pkg_graph.child_nodes(atom):
2441 existing_node, matches = \
2442 self._check_slot_conflict(child_pkg, atom)
2443 if existing_node and not matches:
2447 conflict_atoms.append(atom)
2449 normal_atoms.append(atom)
2451 for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
2452 child_pkgs = atom_pkg_graph.child_nodes(atom)
2453 # if more than one child, yield highest version
2454 if len(child_pkgs) > 1:
2456 yield (atom, child_pkgs[-1])
2458 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2460 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
2461 Yields non-disjunctive deps. Raises InvalidDependString when
2464 for x in dep_struct:
2465 if isinstance(x, list):
2466 if x and x[0] == "||":
2467 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2469 for y in self._queue_disjunctive_deps(
2470 pkg, dep_root, dep_priority, x):
2473 # Note: Eventually this will check for PROPERTIES=virtual
2474 # or whatever other metadata gets implemented for this
2476 if x.cp.startswith('virtual/'):
2477 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2481 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2482 self._dynamic_config._dep_disjunctive_stack.append(
2483 (pkg, dep_root, dep_priority, dep_struct))
2485 def _pop_disjunction(self, allow_unsatisfied):
2487 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
2488 populate self._dynamic_config._dep_stack.
2490 pkg, dep_root, dep_priority, dep_struct = \
2491 self._dynamic_config._dep_disjunctive_stack.pop()
2492 if not self._add_pkg_dep_string(
2493 pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
2497 def _priority(self, **kwargs):
2498 if "remove" in self._dynamic_config.myparams:
2499 priority_constructor = UnmergeDepPriority
2501 priority_constructor = DepPriority
2502 return priority_constructor(**kwargs)
2504 def _dep_expand(self, root_config, atom_without_category):
2506 @param root_config: a root config instance
2507 @type root_config: RootConfig
2508 @param atom_without_category: an atom without a category component
2509 @type atom_without_category: String
2511 @return: a list of atoms containing categories (possibly empty)
2513 null_cp = portage.dep_getkey(insert_category_into_atom(
2514 atom_without_category, "null"))
2515 cat, atom_pn = portage.catsplit(null_cp)
2517 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
2519 for db, pkg_type, built, installed, db_keys in dbs:
2520 for cat in db.categories:
2521 if db.cp_list("%s/%s" % (cat, atom_pn)):
2525 for cat in categories:
2526 deps.append(Atom(insert_category_into_atom(
2527 atom_without_category, cat), allow_repo=True))
2530 def _have_new_virt(self, root, atom_cp):
2532 for db, pkg_type, built, installed, db_keys in \
2533 self._dynamic_config._filtered_trees[root]["dbs"]:
2534 if db.cp_list(atom_cp):
2539 def _iter_atoms_for_pkg(self, pkg):
2540 depgraph_sets = self._dynamic_config.sets[pkg.root]
2541 atom_arg_map = depgraph_sets.atom_arg_map
2542 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
2543 if atom.cp != pkg.cp and \
2544 self._have_new_virt(pkg.root, atom.cp):
2547 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
2548 visible_pkgs.reverse() # descending order
2550 for visible_pkg in visible_pkgs:
2551 if visible_pkg.cp != atom.cp:
2553 if pkg >= visible_pkg:
2554 # This is descending order, and we're not
2555 # interested in any versions <= pkg given.
2557 if pkg.slot_atom != visible_pkg.slot_atom:
2558 higher_slot = visible_pkg
2560 if higher_slot is not None:
2562 for arg in atom_arg_map[(atom, pkg.root)]:
2563 if isinstance(arg, PackageArg) and \
2568 def select_files(self, myfiles):
2569 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
2570 self._dynamic_config._initial_arg_list and call self._resolve to create the
2571 appropriate depgraph and return a favorite list."""
2573 debug = "--debug" in self._frozen_config.myopts
2574 root_config = self._frozen_config.roots[self._frozen_config.target_root]
2575 sets = root_config.sets
2576 depgraph_sets = self._dynamic_config.sets[root_config.root]
2578 eroot = root_config.root
2579 root = root_config.settings['ROOT']
2580 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
2581 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
2582 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
2583 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
2584 pkgsettings = self._frozen_config.pkgsettings[eroot]
2586 onlydeps = "--onlydeps" in self._frozen_config.myopts
2589 ext = os.path.splitext(x)[1]
2591 if not os.path.exists(x):
2593 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2594 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2595 elif os.path.exists(
2596 os.path.join(pkgsettings["PKGDIR"], x)):
2597 x = os.path.join(pkgsettings["PKGDIR"], x)
2599 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
2600 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
2601 return 0, myfavorites
2602 mytbz2=portage.xpak.tbz2(x)
2604 cat = mytbz2.getfile("CATEGORY")
2606 cat = _unicode_decode(cat.strip(),
2607 encoding=_encodings['repo.content'])
2608 mykey = cat + "/" + os.path.basename(x)[:-5]
2611 writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
2612 self._dynamic_config._skip_restart = True
2613 return 0, myfavorites
2614 elif os.path.realpath(x) != \
2615 os.path.realpath(bindb.bintree.getname(mykey)):
2616 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2617 self._dynamic_config._skip_restart = True
2618 return 0, myfavorites
2620 pkg = self._pkg(mykey, "binary", root_config,
2622 args.append(PackageArg(arg=x, package=pkg,
2623 root_config=root_config))
2624 elif ext==".ebuild":
2625 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2626 pkgdir = os.path.dirname(ebuild_path)
2627 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2628 cp = pkgdir[len(tree_root)+1:]
2629 error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
2630 "hierarchy or does not exist\n") % x
2631 if not portage.isvalidatom(cp):
2632 writemsg(error_msg, noiselevel=-1)
2633 return 0, myfavorites
2634 cat = portage.catsplit(cp)[0]
2635 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2636 if not portage.isvalidatom("="+mykey):
2637 writemsg(error_msg, noiselevel=-1)
2638 return 0, myfavorites
2639 ebuild_path = portdb.findname(mykey)
2641 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2642 cp, os.path.basename(ebuild_path)):
2643 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2644 self._dynamic_config._skip_restart = True
2645 return 0, myfavorites
2646 if mykey not in portdb.xmatch(
2647 "match-visible", portage.cpv_getkey(mykey)):
2648 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2649 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2650 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2651 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2654 writemsg(error_msg, noiselevel=-1)
2655 return 0, myfavorites
2656 pkg = self._pkg(mykey, "ebuild", root_config,
2657 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2658 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2659 args.append(PackageArg(arg=x, package=pkg,
2660 root_config=root_config))
2661 elif x.startswith(os.path.sep):
2662 if not x.startswith(eroot):
2663 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2664 " $EROOT.\n") % x, noiselevel=-1)
2665 self._dynamic_config._skip_restart = True
2667 # Queue these up since it's most efficient to handle
2668 # multiple files in a single iter_owners() call.
2669 lookup_owners.append(x)
2670 elif x.startswith("." + os.sep) or \
2671 x.startswith(".." + os.sep):
2672 f = os.path.abspath(x)
2673 if not f.startswith(eroot):
2674 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2675 " $EROOT.\n") % (f, x), noiselevel=-1)
2676 self._dynamic_config._skip_restart = True
2678 lookup_owners.append(f)
2680 if x in ("system", "world"):
2682 if x.startswith(SETPREFIX):
2683 s = x[len(SETPREFIX):]
2685 raise portage.exception.PackageSetNotFound(s)
2686 if s in depgraph_sets.sets:
2689 depgraph_sets.sets[s] = pset
2690 args.append(SetArg(arg=x, pset=pset,
2691 root_config=root_config))
2693 if not is_valid_package_atom(x, allow_repo=True):
2694 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2696 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2697 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2698 self._dynamic_config._skip_restart = True
2700 # Don't expand categories or old-style virtuals here unless
2701 # necessary. Expansion of old-style virtuals here causes at
2702 # least the following problems:
2703 # 1) It's more difficult to determine which set(s) an atom
2704 # came from, if any.
2705 # 2) It takes away freedom from the resolver to choose other
2706 # possible expansions when necessary.
2707 if "/" in x.split(":")[0]:
2708 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2709 root_config=root_config))
2711 expanded_atoms = self._dep_expand(root_config, x)
2712 installed_cp_set = set()
2713 for atom in expanded_atoms:
2714 if vardb.cp_list(atom.cp):
2715 installed_cp_set.add(atom.cp)
2717 if len(installed_cp_set) > 1:
2718 non_virtual_cps = set()
2719 for atom_cp in installed_cp_set:
2720 if not atom_cp.startswith("virtual/"):
2721 non_virtual_cps.add(atom_cp)
2722 if len(non_virtual_cps) == 1:
2723 installed_cp_set = non_virtual_cps
2725 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2726 installed_cp = next(iter(installed_cp_set))
2727 for atom in expanded_atoms:
2728 if atom.cp == installed_cp:
2730 for pkg in self._iter_match_pkgs_any(
2731 root_config, atom.without_use,
2733 if not pkg.installed:
2737 expanded_atoms = [atom]
2740 # If a non-virtual package and one or more virtual packages
2741 # are in expanded_atoms, use the non-virtual package.
2742 if len(expanded_atoms) > 1:
2743 number_of_virtuals = 0
2744 for expanded_atom in expanded_atoms:
2745 if expanded_atom.cp.startswith("virtual/"):
2746 number_of_virtuals += 1
2748 candidate = expanded_atom
2749 if len(expanded_atoms) - number_of_virtuals == 1:
2750 expanded_atoms = [ candidate ]
2752 if len(expanded_atoms) > 1:
2753 writemsg("\n\n", noiselevel=-1)
2754 ambiguous_package_name(x, expanded_atoms, root_config,
2755 self._frozen_config.spinner, self._frozen_config.myopts)
2756 self._dynamic_config._skip_restart = True
2757 return False, myfavorites
2759 atom = expanded_atoms[0]
2761 null_atom = Atom(insert_category_into_atom(x, "null"),
2763 cat, atom_pn = portage.catsplit(null_atom.cp)
2764 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2766 # Allow the depgraph to choose which virtual.
2767 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2772 if atom.use and atom.use.conditional:
2774 ("\n\n!!! '%s' contains a conditional " + \
2775 "which is not allowed.\n") % (x,), noiselevel=-1)
2776 writemsg("!!! Please check ebuild(5) for full details.\n")
2777 self._dynamic_config._skip_restart = True
2780 args.append(AtomArg(arg=x, atom=atom,
2781 root_config=root_config))
2785 search_for_multiple = False
2786 if len(lookup_owners) > 1:
2787 search_for_multiple = True
2789 for x in lookup_owners:
2790 if not search_for_multiple and os.path.isdir(x):
2791 search_for_multiple = True
2792 relative_paths.append(x[len(root)-1:])
2795 for pkg, relative_path in \
2796 real_vardb._owners.iter_owners(relative_paths):
2797 owners.add(pkg.mycpv)
2798 if not search_for_multiple:
2802 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2803 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2804 self._dynamic_config._skip_restart = True
2808 pkg = vardb._pkg_str(cpv, None)
2809 atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
2810 args.append(AtomArg(arg=atom, atom=atom,
2811 root_config=root_config))
2813 if "--update" in self._frozen_config.myopts:
2814 # In some cases, the greedy slots behavior can pull in a slot that
2815 # the user would want to uninstall due to it being blocked by a
2816 # newer version in a different slot. Therefore, it's necessary to
2817 # detect and discard any that should be uninstalled. Each time
2818 # that arguments are updated, package selections are repeated in
2819 # order to ensure consistency with the current arguments:
2821 # 1) Initialize args
2822 # 2) Select packages and generate initial greedy atoms
2823 # 3) Update args with greedy atoms
2824 # 4) Select packages and generate greedy atoms again, while
2825 # accounting for any blockers between selected packages
2826 # 5) Update args with revised greedy atoms
2828 self._set_args(args)
2831 greedy_args.append(arg)
2832 if not isinstance(arg, AtomArg):
2834 for atom in self._greedy_slots(arg.root_config, arg.atom):
2836 AtomArg(arg=arg.arg, atom=atom,
2837 root_config=arg.root_config))
2839 self._set_args(greedy_args)
2842 # Revise greedy atoms, accounting for any blockers
2843 # between selected packages.
2844 revised_greedy_args = []
2846 revised_greedy_args.append(arg)
2847 if not isinstance(arg, AtomArg):
2849 for atom in self._greedy_slots(arg.root_config, arg.atom,
2850 blocker_lookahead=True):
2851 revised_greedy_args.append(
2852 AtomArg(arg=arg.arg, atom=atom,
2853 root_config=arg.root_config))
2854 args = revised_greedy_args
2855 del revised_greedy_args
2857 args.extend(self._gen_reinstall_sets())
2858 self._set_args(args)
2860 myfavorites = set(myfavorites)
2862 if isinstance(arg, (AtomArg, PackageArg)):
2863 myfavorites.add(arg.atom)
2864 elif isinstance(arg, SetArg):
2865 if not arg.internal:
2866 myfavorites.add(arg.arg)
2867 myfavorites = list(myfavorites)
2870 portage.writemsg("\n", noiselevel=-1)
2871 # Order needs to be preserved since a feature of --nodeps
2872 # is to allow the user to force a specific merge order.
2873 self._dynamic_config._initial_arg_list = args[:]
2875 return self._resolve(myfavorites)
2877 def _gen_reinstall_sets(self):
2880 for root, atom in self._rebuild.rebuild_list:
2881 atom_list.append((root, '__auto_rebuild__', atom))
2882 for root, atom in self._rebuild.reinstall_list:
2883 atom_list.append((root, '__auto_reinstall__', atom))
2884 for root, atom in self._dynamic_config._slot_operator_replace_installed:
2885 atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
2888 for root, set_name, atom in atom_list:
2889 set_dict.setdefault((root, set_name), []).append(atom)
2891 for (root, set_name), atoms in set_dict.items():
2892 yield SetArg(arg=(SETPREFIX + set_name),
2893 # Set reset_depth=False here, since we don't want these
2894 # special sets to interact with depth calculations (see
2895 # the emerge --deep=DEPTH option), though we want them
2896 # to behave like normal arguments in most other respects.
2897 pset=InternalPackageSet(initial_atoms=atoms),
2898 force_reinstall=True,
2901 root_config=self._frozen_config.roots[root])
2903 def _resolve(self, myfavorites):
2904 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2905 call self._creategraph to process theier deps and return
2907 debug = "--debug" in self._frozen_config.myopts
2908 onlydeps = "--onlydeps" in self._frozen_config.myopts
2909 myroot = self._frozen_config.target_root
2910 pkgsettings = self._frozen_config.pkgsettings[myroot]
2911 pprovideddict = pkgsettings.pprovideddict
2912 virtuals = pkgsettings.getvirtuals()
2913 args = self._dynamic_config._initial_arg_list[:]
2915 for arg in self._expand_set_args(args, add_to_digraph=True):
2916 for atom in arg.pset.getAtoms():
2917 self._spinner_update()
2918 dep = Dependency(atom=atom, onlydeps=onlydeps,
2919 root=myroot, parent=arg)
2921 pprovided = pprovideddict.get(atom.cp)
2922 if pprovided and portage.match_from_list(atom, pprovided):
2923 # A provided package has been specified on the command line.
2924 self._dynamic_config._pprovided_args.append((arg, atom))
2926 if isinstance(arg, PackageArg):
2927 if not self._add_pkg(arg.package, dep) or \
2928 not self._create_graph():
2929 if not self.need_restart():
2930 sys.stderr.write(("\n\n!!! Problem " + \
2931 "resolving dependencies for %s\n") % \
2933 return 0, myfavorites
2936 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2937 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2938 pkg, existing_node = self._select_package(
2939 myroot, atom, onlydeps=onlydeps)
2941 pprovided_match = False
2942 for virt_choice in virtuals.get(atom.cp, []):
2943 expanded_atom = portage.dep.Atom(
2944 atom.replace(atom.cp, virt_choice.cp, 1))
2945 pprovided = pprovideddict.get(expanded_atom.cp)
2947 portage.match_from_list(expanded_atom, pprovided):
2948 # A provided package has been
2949 # specified on the command line.
2950 self._dynamic_config._pprovided_args.append((arg, atom))
2951 pprovided_match = True
2956 if not (isinstance(arg, SetArg) and \
2957 arg.name in ("selected", "system", "world")):
2958 self._dynamic_config._unsatisfied_deps_for_display.append(
2959 ((myroot, atom), {"myparent" : arg}))
2960 return 0, myfavorites
2962 self._dynamic_config._missing_args.append((arg, atom))
2964 if atom.cp != pkg.cp:
2965 # For old-style virtuals, we need to repeat the
2966 # package.provided check against the selected package.
2967 expanded_atom = atom.replace(atom.cp, pkg.cp)
2968 pprovided = pprovideddict.get(pkg.cp)
2970 portage.match_from_list(expanded_atom, pprovided):
2971 # A provided package has been
2972 # specified on the command line.
2973 self._dynamic_config._pprovided_args.append((arg, atom))
2975 if pkg.installed and \
2976 "selective" not in self._dynamic_config.myparams and \
2977 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2978 pkg, modified_use=self._pkg_use_enabled(pkg)):
2979 self._dynamic_config._unsatisfied_deps_for_display.append(
2980 ((myroot, atom), {"myparent" : arg}))
2981 # Previous behavior was to bail out in this case, but
2982 # since the dep is satisfied by the installed package,
2983 # it's more friendly to continue building the graph
2984 # and just show a warning message. Therefore, only bail
2985 # out here if the atom is not from either the system or
2987 if not (isinstance(arg, SetArg) and \
2988 arg.name in ("selected", "system", "world")):
2989 return 0, myfavorites
2991 # Add the selected package to the graph as soon as possible
2992 # so that later dep_check() calls can use it as feedback
2993 # for making more consistent atom selections.
2994 if not self._add_pkg(pkg, dep):
2995 if self.need_restart():
2997 elif isinstance(arg, SetArg):
2998 writemsg(("\n\n!!! Problem resolving " + \
2999 "dependencies for %s from %s\n") % \
3000 (atom, arg.arg), noiselevel=-1)
3002 writemsg(("\n\n!!! Problem resolving " + \
3003 "dependencies for %s\n") % \
3004 (atom,), noiselevel=-1)
3005 return 0, myfavorites
3007 except SystemExit as e:
3008 raise # Needed else can't exit
3009 except Exception as e:
3010 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
3011 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
3014 # Now that the root packages have been added to the graph,
3015 # process the dependencies.
3016 if not self._create_graph():
3017 return 0, myfavorites
3021 except self._unknown_internal_error:
3022 return False, myfavorites
3024 if (self._dynamic_config._slot_collision_info and
3025 not self._accept_blocker_conflicts()) or \
3026 (self._dynamic_config._allow_backtracking and
3027 "slot conflict" in self._dynamic_config._backtrack_infos):
3028 return False, myfavorites
3030 if self._rebuild.trigger_rebuilds():
3031 backtrack_infos = self._dynamic_config._backtrack_infos
3032 config = backtrack_infos.setdefault("config", {})
3033 config["rebuild_list"] = self._rebuild.rebuild_list
3034 config["reinstall_list"] = self._rebuild.reinstall_list
3035 self._dynamic_config._need_restart = True
3036 return False, myfavorites
3038 if "config" in self._dynamic_config._backtrack_infos and \
3039 ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
3040 "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
3041 self.need_restart():
3042 return False, myfavorites
3044 if not self._dynamic_config._prune_rebuilds and \
3045 self._dynamic_config._slot_operator_replace_installed and \
3046 self._get_missed_updates():
3047 # When there are missed updates, we might have triggered
3048 # some unnecessary rebuilds (see bug #439688). So, prune
3049 # all the rebuilds and backtrack with the problematic
3050 # updates masked. The next backtrack run should pull in
3051 # any rebuilds that are really needed, and this
3052 # prune_rebuilds path should never be entered more than
3053 # once in a series of backtracking nodes (in order to
3054 # avoid a backtracking loop).
3055 backtrack_infos = self._dynamic_config._backtrack_infos
3056 config = backtrack_infos.setdefault("config", {})
3057 config["prune_rebuilds"] = True
3058 self._dynamic_config._need_restart = True
3059 return False, myfavorites
3061 # Any failures except those due to autounmask *alone* should return
3062 # before this point, since the success_without_autounmask flag that's
3063 # set below is reserved for cases where there are *zero* other
3064 # problems. For reference, see backtrack_depgraph, where it skips the
3065 # get_best_run() call when success_without_autounmask is True.
3067 digraph_nodes = self._dynamic_config.digraph.nodes
3069 if any(x in digraph_nodes for x in
3070 self._dynamic_config._needed_unstable_keywords) or \
3071 any(x in digraph_nodes for x in
3072 self._dynamic_config._needed_p_mask_changes) or \
3073 any(x in digraph_nodes for x in
3074 self._dynamic_config._needed_use_config_changes) or \
3075 any(x in digraph_nodes for x in
3076 self._dynamic_config._needed_license_changes) :
3077 #We failed if the user needs to change the configuration
3078 self._dynamic_config._success_without_autounmask = True
3079 return False, myfavorites
3081 # We're true here unless we are missing binaries.
3082 return (True, myfavorites)
3084 def _set_args(self, args):
3086 Create the "__non_set_args__" package set from atoms and packages given as
3087 arguments. This method can be called multiple times if necessary.
3088 The package selection cache is automatically invalidated, since
3089 arguments influence package selections.
3094 for root in self._dynamic_config.sets:
3095 depgraph_sets = self._dynamic_config.sets[root]
3096 depgraph_sets.sets.setdefault('__non_set_args__',
3097 InternalPackageSet(allow_repo=True)).clear()
3098 depgraph_sets.atoms.clear()
3099 depgraph_sets.atom_arg_map.clear()
3100 set_atoms[root] = []
3101 non_set_atoms[root] = []
3103 # We don't add set args to the digraph here since that
3104 # happens at a later stage and we don't want to make
3105 # any state changes here that aren't reversed by a
3106 # another call to this method.
3107 for arg in self._expand_set_args(args, add_to_digraph=False):
3108 atom_arg_map = self._dynamic_config.sets[
3109 arg.root_config.root].atom_arg_map
3110 if isinstance(arg, SetArg):
3111 atom_group = set_atoms[arg.root_config.root]
3113 atom_group = non_set_atoms[arg.root_config.root]
3115 for atom in arg.pset.getAtoms():
3116 atom_group.append(atom)
3117 atom_key = (atom, arg.root_config.root)
3118 refs = atom_arg_map.get(atom_key)
3121 atom_arg_map[atom_key] = refs
3125 for root in self._dynamic_config.sets:
3126 depgraph_sets = self._dynamic_config.sets[root]
3127 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
3128 non_set_atoms.get(root, [])))
3129 depgraph_sets.sets['__non_set_args__'].update(
3130 non_set_atoms.get(root, []))
3132 # Invalidate the package selection cache, since
3133 # arguments influence package selections.
3134 self._dynamic_config._highest_pkg_cache.clear()
3135 for trees in self._dynamic_config._filtered_trees.values():
3136 trees["porttree"].dbapi._clear_cache()
3138 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3140 Return a list of slot atoms corresponding to installed slots that
3141 differ from the slot of the highest visible match. When
3142 blocker_lookahead is True, slot atoms that would trigger a blocker
3143 conflict are automatically discarded, potentially allowing automatic
3144 uninstallation of older slots when appropriate.
3146 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3147 if highest_pkg is None:
3149 vardb = root_config.trees["vartree"].dbapi
3151 for cpv in vardb.match(atom):
3152 # don't mix new virtuals with old virtuals
3153 pkg = vardb._pkg_str(cpv, None)
3154 if pkg.cp == highest_pkg.cp:
3157 slots.add(highest_pkg.slot)
3161 slots.remove(highest_pkg.slot)
3164 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3165 pkg, in_graph = self._select_package(root_config.root, slot_atom)
3166 if pkg is not None and \
3167 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3168 greedy_pkgs.append(pkg)
3171 if not blocker_lookahead:
3172 return [pkg.slot_atom for pkg in greedy_pkgs]
3175 blocker_dep_keys = Package._dep_keys
3176 for pkg in greedy_pkgs + [highest_pkg]:
3177 dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
3179 selected_atoms = self._select_atoms(
3180 pkg.root, dep_str, self._pkg_use_enabled(pkg),
3181 parent=pkg, strict=True)
3182 except portage.exception.InvalidDependString:
3185 for atoms in selected_atoms.values():
3186 blocker_atoms.extend(x for x in atoms if x.blocker)
3187 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3189 if highest_pkg not in blockers:
3192 # filter packages with invalid deps
3193 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3195 # filter packages that conflict with highest_pkg
3196 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3197 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
3198 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
3203 # If two packages conflict, discard the lower version.
3204 discard_pkgs = set()
3205 greedy_pkgs.sort(reverse=True)
3206 for i in range(len(greedy_pkgs) - 1):
3207 pkg1 = greedy_pkgs[i]
3208 if pkg1 in discard_pkgs:
3210 for j in range(i + 1, len(greedy_pkgs)):
3211 pkg2 = greedy_pkgs[j]
3212 if pkg2 in discard_pkgs:
3214 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
3215 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
3217 discard_pkgs.add(pkg2)
3219 return [pkg.slot_atom for pkg in greedy_pkgs \
3220 if pkg not in discard_pkgs]
3222 def _select_atoms_from_graph(self, *pargs, **kwargs):
3224 Prefer atoms matching packages that have already been
3225 added to the graph or those that are installed and have
3226 not been scheduled for replacement.
3228 kwargs["trees"] = self._dynamic_config._graph_trees
3229 return self._select_atoms_highest_available(*pargs,
3230 **portage._native_kwargs(kwargs))
3232 def _select_atoms_highest_available(self, root, depstring,
3233 myuse=None, parent=None, strict=True, trees=None, priority=None):
3234 """This will raise InvalidDependString if necessary. If trees is
3235 None then self._dynamic_config._filtered_trees is used."""
3237 if not isinstance(depstring, list):
3239 is_valid_flag = None
3240 if parent is not None:
3242 if not parent.installed:
3243 is_valid_flag = parent.iuse.is_valid_flag
3244 depstring = portage.dep.use_reduce(depstring,
3245 uselist=myuse, opconvert=True, token_class=Atom,
3246 is_valid_flag=is_valid_flag, eapi=eapi)
3248 if (self._dynamic_config.myparams.get(
3249 "ignore_built_slot_operator_deps", "n") == "y" and
3250 parent and parent.built):
3251 ignore_built_slot_operator_deps(depstring)
3253 pkgsettings = self._frozen_config.pkgsettings[root]
3255 trees = self._dynamic_config._filtered_trees
3256 mytrees = trees[root]
3257 atom_graph = digraph()
3259 # Temporarily disable autounmask so that || preferences
3260 # account for masking and USE settings.
3261 _autounmask_backup = self._dynamic_config._autounmask
3262 self._dynamic_config._autounmask = False
3263 # backup state for restoration, in case of recursive
3264 # calls to this method
3265 backup_state = mytrees.copy()
3267 # clear state from previous call, in case this
3268 # call is recursive (we have a backup, that we
3269 # will use to restore it later)
3270 mytrees.pop("pkg_use_enabled", None)
3271 mytrees.pop("parent", None)
3272 mytrees.pop("atom_graph", None)
3273 mytrees.pop("priority", None)
3275 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
3276 if parent is not None:
3277 mytrees["parent"] = parent
3278 mytrees["atom_graph"] = atom_graph
3279 if priority is not None:
3280 mytrees["priority"] = priority
3282 mycheck = portage.dep_check(depstring, None,
3283 pkgsettings, myuse=myuse,
3284 myroot=root, trees=trees)
3287 self._dynamic_config._autounmask = _autounmask_backup
3288 mytrees.pop("pkg_use_enabled", None)
3289 mytrees.pop("parent", None)
3290 mytrees.pop("atom_graph", None)
3291 mytrees.pop("priority", None)
3292 mytrees.update(backup_state)
3294 raise portage.exception.InvalidDependString(mycheck[1])
3296 selected_atoms = mycheck[1]
3297 elif parent not in atom_graph:
3298 selected_atoms = {parent : mycheck[1]}
3300 # Recursively traversed virtual dependencies, and their
3301 # direct dependencies, are considered to have the same
3302 # depth as direct dependencies.
3303 if parent.depth is None:
3306 virt_depth = parent.depth + 1
3307 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
3308 selected_atoms = OrderedDict()
3309 node_stack = [(parent, None, None)]
3310 traversed_nodes = set()
3312 node, node_parent, parent_atom = node_stack.pop()
3313 traversed_nodes.add(node)
3317 if node_parent is parent:
3318 if priority is None:
3319 node_priority = None
3321 node_priority = priority.copy()
3323 # virtuals only have runtime deps
3324 node_priority = self._priority(runtime=True)
3326 k = Dependency(atom=parent_atom,
3327 blocker=parent_atom.blocker, child=node,
3328 depth=virt_depth, parent=node_parent,
3329 priority=node_priority, root=node.root)
3332 selected_atoms[k] = child_atoms
3333 for atom_node in atom_graph.child_nodes(node):
3334 child_atom = atom_node[0]
3335 if id(child_atom) not in chosen_atom_ids:
3337 child_atoms.append(child_atom)
3338 for child_node in atom_graph.child_nodes(atom_node):
3339 if child_node in traversed_nodes:
3341 if not portage.match_from_list(
3342 child_atom, [child_node]):
3343 # Typically this means that the atom
3344 # specifies USE deps that are unsatisfied
3345 # by the selected package. The caller will
3346 # record this as an unsatisfied dependency
3349 node_stack.append((child_node, node, child_atom))
3351 return selected_atoms
3353 def _expand_virt_from_graph(self, root, atom):
3354 if not isinstance(atom, Atom):
3356 graphdb = self._dynamic_config.mydbapi[root]
3357 match = graphdb.match_pkgs(atom)
3362 if not pkg.cpv.startswith("virtual/"):
3366 rdepend = self._select_atoms_from_graph(
3367 pkg.root, pkg._metadata.get("RDEPEND", ""),
3368 myuse=self._pkg_use_enabled(pkg),
3369 parent=pkg, strict=False)
3370 except InvalidDependString as e:
3371 writemsg_level("!!! Invalid RDEPEND in " + \
3372 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3373 (pkg.root, pkg.cpv, e),
3374 noiselevel=-1, level=logging.ERROR)
3378 for atoms in rdepend.values():
3380 if hasattr(atom, "_orig_atom"):
3381 # Ignore virtual atoms since we're only
3382 # interested in expanding the real atoms.
3386 def _virt_deps_visible(self, pkg, ignore_use=False):
3388 Assumes pkg is a virtual package. Traverses virtual deps recursively
3389 and returns True if all deps are visible, False otherwise. This is
3390 useful for checking if it will be necessary to expand virtual slots,
3391 for cases like bug #382557.
3394 rdepend = self._select_atoms(
3395 pkg.root, pkg._metadata.get("RDEPEND", ""),
3396 myuse=self._pkg_use_enabled(pkg),
3397 parent=pkg, priority=self._priority(runtime=True))
3398 except InvalidDependString as e:
3399 if not pkg.installed:
3401 writemsg_level("!!! Invalid RDEPEND in " + \
3402 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3403 (pkg.root, pkg.cpv, e),
3404 noiselevel=-1, level=logging.ERROR)
3407 for atoms in rdepend.values():
3410 atom = atom.without_use
3411 pkg, existing = self._select_package(
3413 if pkg is None or not self._pkg_visibility_check(pkg):
3418 def _get_dep_chain(self, start_node, target_atom=None,
3419 unsatisfied_dependency=False):
3421 Returns a list of (atom, node_type) pairs that represent a dep chain.
3422 If target_atom is None, the first package shown is pkg's parent.
3423 If target_atom is not None the first package shown is pkg.
3424 If unsatisfied_dependency is True, the first parent is select who's
3425 dependency is not satisfied by 'pkg'. This is need for USE changes.
3426 (Does not support target_atom.)
3428 traversed_nodes = set()
3432 all_parents = self._dynamic_config._parent_atoms
3433 graph = self._dynamic_config.digraph
3434 verbose_main_repo_display = "--verbose-main-repo-display" in \
3435 self._frozen_config.myopts
3437 def format_pkg(pkg):
3438 pkg_name = "%s" % (pkg.cpv,)
3439 if verbose_main_repo_display or pkg.repo != \
3440 pkg.root_config.settings.repositories.mainRepo().name:
3441 pkg_name += _repo_separator + pkg.repo
3444 if target_atom is not None and isinstance(node, Package):
3445 affecting_use = set()
3446 for dep_str in Package._dep_keys:
3448 affecting_use.update(extract_affecting_use(
3449 node._metadata[dep_str], target_atom,
3451 except InvalidDependString:
3452 if not node.installed:
3454 affecting_use.difference_update(node.use.mask, node.use.force)
3455 pkg_name = format_pkg(node)
3459 for flag in affecting_use:
3460 if flag in self._pkg_use_enabled(node):
3463 usedep.append("-"+flag)
3464 pkg_name += "[%s]" % ",".join(usedep)
3466 dep_chain.append((pkg_name, node.type_name))
3469 # To build a dep chain for the given package we take
3470 # "random" parents form the digraph, except for the
3471 # first package, because we want a parent that forced
3472 # the corresponding change (i.e '>=foo-2', instead 'foo').
3474 traversed_nodes.add(start_node)
3476 start_node_parent_atoms = {}
3477 for ppkg, patom in all_parents.get(node, []):
3478 # Get a list of suitable atoms. For use deps
3479 # (aka unsatisfied_dependency is not None) we
3480 # need that the start_node doesn't match the atom.
3481 if not unsatisfied_dependency or \
3482 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
3483 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
3485 if start_node_parent_atoms:
3486 # If there are parents in all_parents then use one of them.
3487 # If not, then this package got pulled in by an Arg and
3488 # will be correctly handled by the code that handles later
3489 # packages in the dep chain.
3490 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
3493 for ppkg in start_node_parent_atoms[best_match]:
3495 if ppkg in self._dynamic_config._initial_arg_list:
3496 # Stop if reached the top level of the dep chain.
3499 while node is not None:
3500 traversed_nodes.add(node)
3502 if node not in graph:
3503 # The parent is not in the graph due to backtracking.
3506 elif isinstance(node, DependencyArg):
3507 if graph.parent_nodes(node):
3510 node_type = "argument"
3511 dep_chain.append(("%s" % (node,), node_type))
3513 elif node is not start_node:
3514 for ppkg, patom in all_parents[child]:
3516 if child is start_node and unsatisfied_dependency and \
3517 InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
3518 # This atom is satisfied by child, there must be another atom.
3520 atom = patom.unevaluated_atom
3524 priorities = graph.nodes[node][0].get(child)
3525 if priorities is None:
3526 # This edge comes from _parent_atoms and was not added to
3527 # the graph, and _parent_atoms does not contain priorities.
3528 for k in Package._dep_keys:
3529 dep_strings.add(node._metadata[k])
3531 for priority in priorities:
3532 if priority.buildtime:
3533 for k in Package._buildtime_keys:
3534 dep_strings.add(node._metadata[k])
3535 if priority.runtime:
3536 dep_strings.add(node._metadata["RDEPEND"])
3537 if priority.runtime_post:
3538 dep_strings.add(node._metadata["PDEPEND"])
3540 affecting_use = set()
3541 for dep_str in dep_strings:
3543 affecting_use.update(extract_affecting_use(
3544 dep_str, atom, eapi=node.eapi))
3545 except InvalidDependString:
3546 if not node.installed:
3549 #Don't show flags as 'affecting' if the user can't change them,
3550 affecting_use.difference_update(node.use.mask, \
3553 pkg_name = format_pkg(node)
3556 for flag in affecting_use:
3557 if flag in self._pkg_use_enabled(node):
3560 usedep.append("-"+flag)
3561 pkg_name += "[%s]" % ",".join(usedep)
3563 dep_chain.append((pkg_name, node.type_name))
3565 # When traversing to parents, prefer arguments over packages
3566 # since arguments are root nodes. Never traverse the same
3567 # package twice, in order to prevent an infinite loop.
3569 selected_parent = None
3572 parent_unsatisfied = None
3574 for parent in self._dynamic_config.digraph.parent_nodes(node):
3575 if parent in traversed_nodes:
3577 if isinstance(parent, DependencyArg):
3580 if isinstance(parent, Package) and \
3581 parent.operation == "merge":
3582 parent_merge = parent
3583 if unsatisfied_dependency and node is start_node:
3584 # Make sure that pkg doesn't satisfy parent's dependency.
3585 # This ensures that we select the correct parent for use
3587 for ppkg, atom in all_parents[start_node]:
3589 atom_set = InternalPackageSet(initial_atoms=(atom,))
3590 if not atom_set.findAtomForPackage(start_node):
3591 parent_unsatisfied = parent
3594 selected_parent = parent
3596 if parent_unsatisfied is not None:
3597 selected_parent = parent_unsatisfied
3598 elif parent_merge is not None:
3599 # Prefer parent in the merge list (bug #354747).
3600 selected_parent = parent_merge
3601 elif parent_arg is not None:
3602 if self._dynamic_config.digraph.parent_nodes(parent_arg):
3603 selected_parent = parent_arg
3605 dep_chain.append(("%s" % (parent_arg,), "argument"))
3606 selected_parent = None
3608 node = selected_parent
3611 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
3612 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
3614 for node, node_type in dep_chain:
3615 if node_type == "argument":
3616 display_list.append("required by %s (argument)" % node)
3618 display_list.append("required by %s" % node)
3620 msg = "# " + "\n# ".join(display_list) + "\n"
3624 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
3625 check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
3627 When check_backtrack=True, no output is produced and
3628 the method either returns or raises _backtrack_mask if
3629 a matching package has been masked by backtracking.
3631 backtrack_mask = False
3632 autounmask_broke_use_dep = False
3633 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
3635 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
3637 xinfo = '"%s"' % atom.unevaluated_atom
3640 if isinstance(myparent, AtomArg):
3641 xinfo = '"%s"' % (myparent,)
3642 # Discard null/ from failed cpv_expand category expansion.
3643 xinfo = xinfo.replace("null/", "")
3644 if root != self._frozen_config._running_root.root:
3645 xinfo = "%s for %s" % (xinfo, root)
3646 masked_packages = []
3648 missing_use_adjustable = set()
3649 required_use_unsatisfied = []
3650 masked_pkg_instances = set()
3651 have_eapi_mask = False
3652 pkgsettings = self._frozen_config.pkgsettings[root]
3653 root_config = self._frozen_config.roots[root]
3654 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3655 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3656 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
3657 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3658 for db, pkg_type, built, installed, db_keys in dbs:
3661 if hasattr(db, "xmatch"):
3662 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
3664 cpv_list = db.match(atom.without_use)
3666 if atom.repo is None and hasattr(db, "getRepositories"):
3667 repo_list = db.getRepositories()
3669 repo_list = [atom.repo]
3673 for cpv in cpv_list:
3674 for repo in repo_list:
3675 if not db.cpv_exists(cpv, myrepo=repo):
3678 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
3679 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
3680 if metadata is not None and \
3681 portage.eapi_is_supported(metadata["EAPI"]):
3683 repo = metadata.get('repository')
3684 pkg = self._pkg(cpv, pkg_type, root_config,
3685 installed=installed, myrepo=repo)
3686 # pkg._metadata contains calculated USE for ebuilds,
3687 # required later for getMissingLicenses.
3688 metadata = pkg._metadata
3690 # Avoid doing any operations with packages that
3691 # have invalid metadata. It would be unsafe at
3692 # least because it could trigger unhandled
3693 # exceptions in places like check_required_use().
3694 masked_packages.append(
3695 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3697 if not atom_set.findAtomForPackage(pkg,
3698 modified_use=self._pkg_use_enabled(pkg)):
3700 if pkg in self._dynamic_config._runtime_pkg_mask:
3701 backtrack_reasons = \
3702 self._dynamic_config._runtime_pkg_mask[pkg]
3703 mreasons.append('backtracking: %s' % \
3704 ', '.join(sorted(backtrack_reasons)))
3705 backtrack_mask = True
3706 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3707 modified_use=self._pkg_use_enabled(pkg)):
3708 mreasons = ["exclude option"]
3710 masked_pkg_instances.add(pkg)
3711 if atom.unevaluated_atom.use:
3713 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3714 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3715 missing_use.append(pkg)
3716 if atom_set_with_use.findAtomForPackage(pkg):
3717 autounmask_broke_use_dep = True
3721 writemsg("violated_conditionals raised " + \
3722 "InvalidAtom: '%s' parent: %s" % \
3723 (atom, myparent), noiselevel=-1)
3725 if not mreasons and \
3727 pkg._metadata.get("REQUIRED_USE") and \
3728 eapi_has_required_use(pkg.eapi):
3729 if not check_required_use(
3730 pkg._metadata["REQUIRED_USE"],
3731 self._pkg_use_enabled(pkg),
3732 pkg.iuse.is_valid_flag,
3734 required_use_unsatisfied.append(pkg)
3736 root_slot = (pkg.root, pkg.slot_atom)
3737 if pkg.built and root_slot in self._rebuild.rebuild_list:
3738 mreasons = ["need to rebuild from source"]
3739 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3740 mreasons = ["need to rebuild from source"]
3741 elif pkg.built and not mreasons:
3742 mreasons = ["use flag configuration mismatch"]
3743 masked_packages.append(
3744 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3748 raise self._backtrack_mask()
3752 if check_autounmask_breakage:
3753 if autounmask_broke_use_dep:
3754 raise self._autounmask_breakage()
3758 missing_use_reasons = []
3759 missing_iuse_reasons = []
3760 for pkg in missing_use:
3761 use = self._pkg_use_enabled(pkg)
3763 #Use the unevaluated atom here, because some flags might have gone
3764 #lost during evaluation.
3765 required_flags = atom.unevaluated_atom.use.required
3766 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3770 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3771 missing_iuse_reasons.append((pkg, mreasons))
3773 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3774 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3776 untouchable_flags = \
3777 frozenset(chain(pkg.use.mask, pkg.use.force))
3778 if any(x in untouchable_flags for x in
3779 chain(need_enable, need_disable)):
3782 missing_use_adjustable.add(pkg)
3783 required_use = pkg._metadata.get("REQUIRED_USE")
3784 required_use_warning = ""
3786 old_use = self._pkg_use_enabled(pkg)
3787 new_use = set(self._pkg_use_enabled(pkg))
3788 for flag in need_enable:
3790 for flag in need_disable:
3791 new_use.discard(flag)
3792 if check_required_use(required_use, old_use,
3793 pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
3794 and not check_required_use(required_use, new_use,
3795 pkg.iuse.is_valid_flag, eapi=pkg.eapi):
3796 required_use_warning = ", this change violates use flag constraints " + \
3797 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3799 if need_enable or need_disable:
3801 changes.extend(colorize("red", "+" + x) \
3802 for x in need_enable)
3803 changes.extend(colorize("blue", "-" + x) \
3804 for x in need_disable)
3805 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3806 missing_use_reasons.append((pkg, mreasons))
3808 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3809 # Lets see if the violated use deps are conditional.
3810 # If so, suggest to change them on the parent.
3812 # If the child package is masked then a change to
3813 # parent USE is not a valid solution (a normal mask
3814 # message should be displayed instead).
3815 if pkg in masked_pkg_instances:
3819 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3820 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3821 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3822 #all violated use deps are conditional
3824 conditional = violated_atom.use.conditional
3825 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3826 conditional.enabled, conditional.disabled))
3828 untouchable_flags = \
3829 frozenset(chain(myparent.use.mask, myparent.use.force))
3830 if any(x in untouchable_flags for x in involved_flags):
3833 required_use = myparent._metadata.get("REQUIRED_USE")
3834 required_use_warning = ""
3836 old_use = self._pkg_use_enabled(myparent)
3837 new_use = set(self._pkg_use_enabled(myparent))
3838 for flag in involved_flags:
3840 new_use.discard(flag)
3843 if check_required_use(required_use, old_use,
3844 myparent.iuse.is_valid_flag,
3845 eapi=myparent.eapi) and \
3846 not check_required_use(required_use, new_use,
3847 myparent.iuse.is_valid_flag,
3848 eapi=myparent.eapi):
3849 required_use_warning = ", this change violates use flag constraints " + \
3850 "defined by %s: '%s'" % (myparent.cpv, \
3851 human_readable_required_use(required_use))
3853 for flag in involved_flags:
3854 if flag in self._pkg_use_enabled(myparent):
3855 changes.append(colorize("blue", "-" + flag))
3857 changes.append(colorize("red", "+" + flag))
3858 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3859 if (myparent, mreasons) not in missing_use_reasons:
3860 missing_use_reasons.append((myparent, mreasons))
3862 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3863 in missing_use_reasons if pkg not in masked_pkg_instances]
3865 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3866 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3868 show_missing_use = False
3869 if unmasked_use_reasons:
3870 # Only show the latest version.
3871 show_missing_use = []
3873 parent_reason = None
3874 for pkg, mreasons in unmasked_use_reasons:
3876 if parent_reason is None:
3877 #This happens if a use change on the parent
3878 #leads to a satisfied conditional use dep.
3879 parent_reason = (pkg, mreasons)
3880 elif pkg_reason is None:
3881 #Don't rely on the first pkg in unmasked_use_reasons,
3882 #being the highest version of the dependency.
3883 pkg_reason = (pkg, mreasons)
3885 show_missing_use.append(pkg_reason)
3887 show_missing_use.append(parent_reason)
3889 elif unmasked_iuse_reasons:
3890 masked_with_iuse = False
3891 for pkg in masked_pkg_instances:
3892 #Use atom.unevaluated here, because some flags might have gone
3893 #lost during evaluation.
3894 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3895 # Package(s) with required IUSE are masked,
3896 # so display a normal masking message.
3897 masked_with_iuse = True
3899 if not masked_with_iuse:
3900 show_missing_use = unmasked_iuse_reasons
3902 if required_use_unsatisfied:
3903 # If there's a higher unmasked version in missing_use_adjustable
3904 # then we want to show that instead.
3905 for pkg in missing_use_adjustable:
3906 if pkg not in masked_pkg_instances and \
3907 pkg > required_use_unsatisfied[0]:
3908 required_use_unsatisfied = False
3913 if show_req_use is None and required_use_unsatisfied:
3914 # We have an unmasked package that only requires USE adjustment
3915 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3916 # that the user wants the latest version, so only the first
3917 # instance is displayed.
3918 show_req_use = required_use_unsatisfied[0]
3920 if show_req_use is not None:
3923 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3924 writemsg("\n!!! " + \
3925 colorize("BAD", "The ebuild selected to satisfy ") + \
3926 colorize("INFORM", xinfo) + \
3927 colorize("BAD", " has unmet requirements.") + "\n",
3929 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3930 writemsg("- %s %s\n" % (output_cpv, use_display),
3932 writemsg("\n The following REQUIRED_USE flag constraints " + \
3933 "are unsatisfied:\n", noiselevel=-1)
3934 reduced_noise = check_required_use(
3935 pkg._metadata["REQUIRED_USE"],
3936 self._pkg_use_enabled(pkg),
3937 pkg.iuse.is_valid_flag,
3938 eapi=pkg.eapi).tounicode()
3939 writemsg(" %s\n" % \
3940 human_readable_required_use(reduced_noise),
3942 normalized_required_use = \
3943 " ".join(pkg._metadata["REQUIRED_USE"].split())
3944 if reduced_noise != normalized_required_use:
3945 writemsg("\n The above constraints " + \
3946 "are a subset of the following complete expression:\n",
3948 writemsg(" %s\n" % \
3949 human_readable_required_use(normalized_required_use),
3951 writemsg("\n", noiselevel=-1)
3953 elif show_missing_use:
3954 writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3955 writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3956 for pkg, mreasons in show_missing_use:
3957 writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3959 elif masked_packages:
3960 writemsg("\n!!! " + \
3961 colorize("BAD", "All ebuilds that could satisfy ") + \
3962 colorize("INFORM", xinfo) + \
3963 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3964 writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3965 have_eapi_mask = show_masked_packages(masked_packages)
3967 writemsg("\n", noiselevel=-1)
3968 msg = ("The current version of portage supports " + \
3969 "EAPI '%s'. You must upgrade to a newer version" + \
3970 " of portage before EAPI masked packages can" + \
3971 " be installed.") % portage.const.EAPI
3972 writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3973 writemsg("\n", noiselevel=-1)
3977 if not atom.cp.startswith("null/"):
3978 for pkg in self._iter_match_pkgs_any(
3979 root_config, Atom(atom.cp)):
3983 writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3984 if isinstance(myparent, AtomArg) and \
3986 self._frozen_config.myopts.get(
3987 "--misspell-suggestions", "y") != "n":
3989 writemsg("\nemerge: searching for similar names..."
3993 if "--usepkgonly" not in self._frozen_config.myopts:
3995 if "--usepkg" in self._frozen_config.myopts:
3998 matches = similar_name_search(dbs, atom)
4000 if len(matches) == 1:
4001 writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
4003 elif len(matches) > 1:
4005 "\nemerge: Maybe you meant any of these: %s?\n" % \
4006 (", ".join(matches),), noiselevel=-1)
4008 # Generally, this would only happen if
4009 # all dbapis are empty.
4010 writemsg(" nothing similar found.\n"
4013 if not isinstance(myparent, AtomArg):
4014 # It's redundant to show parent for AtomArg since
4015 # it's the same as 'xinfo' displayed above.
4016 dep_chain = self._get_dep_chain(myparent, atom)
4017 for node, node_type in dep_chain:
4018 msg.append('(dependency required by "%s" [%s])' % \
4019 (colorize('INFORM', "%s" % (node)), node_type))
4022 writemsg("\n".join(msg), noiselevel=-1)
4023 writemsg("\n", noiselevel=-1)
4027 writemsg("\n", noiselevel=-1)
4029 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
4030 for db, pkg_type, built, installed, db_keys in \
4031 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
4032 for pkg in self._iter_match_pkgs(root_config,
4033 pkg_type, atom, onlydeps=onlydeps):
4036 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
4038 Iterate over Package instances of pkg_type matching the given atom.
4039 This does not check visibility and it also does not match USE for
4040 unbuilt ebuilds since USE are lazily calculated after visibility
4041 checks (to avoid the expense when possible).
4044 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
4045 atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
4046 cp_list = db.cp_list(atom_exp.cp)
4047 matched_something = False
4048 installed = pkg_type == 'installed'
4051 atom_set = InternalPackageSet(initial_atoms=(atom,),
4053 if atom.repo is None and hasattr(db, "getRepositories"):
4054 repo_list = db.getRepositories()
4056 repo_list = [atom.repo]
4061 # Call match_from_list on one cpv at a time, in order
4062 # to avoid unnecessary match_from_list comparisons on
4063 # versions that are never yielded from this method.
4064 if not match_from_list(atom_exp, [cpv]):
4066 for repo in repo_list:
4069 pkg = self._pkg(cpv, pkg_type, root_config,
4070 installed=installed, onlydeps=onlydeps, myrepo=repo)
4071 except portage.exception.PackageNotFound:
4074 # A cpv can be returned from dbapi.match() as an
4075 # old-style virtual match even in cases when the
4076 # package does not actually PROVIDE the virtual.
4077 # Filter out any such false matches here.
4079 # Make sure that cpv from the current repo satisfies the atom.
4080 # This might not be the case if there are several repos with
4081 # the same cpv, but different metadata keys, like SLOT.
4082 # Also, parts of the match that require metadata access
4083 # are deferred until we have cached the metadata in a
4085 if not atom_set.findAtomForPackage(pkg,
4086 modified_use=self._pkg_use_enabled(pkg)):
4088 matched_something = True
4091 # USE=multislot can make an installed package appear as if
4092 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
4093 # won't do any good as long as USE=multislot is enabled since
4094 # the newly built package still won't have the expected slot.
4095 # Therefore, assume that such SLOT dependencies are already
4096 # satisfied rather than forcing a rebuild.
4097 if not matched_something and installed and \
4098 atom.slot is not None and not atom.slot_operator_built:
4100 if "remove" in self._dynamic_config.myparams:
4101 # We need to search the portdbapi, which is not in our
4102 # normal dbs list, in order to find the real SLOT.
4103 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
4104 db_keys = list(portdb._aux_cache_keys)
4105 dbs = [(portdb, "ebuild", False, False, db_keys)]
4107 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
4109 cp_list = db.cp_list(atom_exp.cp)
4111 atom_set = InternalPackageSet(
4112 initial_atoms=(atom.without_slot,), allow_repo=True)
4113 atom_exp_without_slot = atom_exp.without_slot
4116 if not match_from_list(atom_exp_without_slot, [cpv]):
4118 slot_available = False
4119 for other_db, other_type, other_built, \
4120 other_installed, other_keys in dbs:
4122 if portage.dep._match_slot(atom,
4123 other_db._pkg_str(_unicode(cpv), None)):
4124 slot_available = True
4126 except (KeyError, InvalidData):
4128 if not slot_available:
4130 inst_pkg = self._pkg(cpv, "installed",
4131 root_config, installed=installed, myrepo=atom.repo)
4132 # Remove the slot from the atom and verify that
4133 # the package matches the resulting atom.
4134 if atom_set.findAtomForPackage(inst_pkg):
4138 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
4139 cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
4140 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
4143 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
4144 self._dynamic_config._highest_pkg_cache[cache_key] = ret
4147 if self._pkg_visibility_check(pkg) and \
4148 not (pkg.installed and pkg.masks):
4149 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
4152 def _want_installed_pkg(self, pkg):
4154 Given an installed package returned from select_pkg, return
4155 True if the user has not explicitly requested for this package
4156 to be replaced (typically via an atom on the command line).
4158 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
4159 modified_use=self._pkg_use_enabled(pkg)):
4164 for arg, atom in self._iter_atoms_for_pkg(pkg):
4165 if arg.force_reinstall:
4167 except InvalidDependString:
4170 if "selective" in self._dynamic_config.myparams:
4175 def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
4178 pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
4179 except portage.exception.PackageNotFound:
4180 pkg_eb_visible = False
4181 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
4182 "ebuild", Atom("=%s" % (pkg.cpv,))):
4183 if self._pkg_visibility_check(pkg_eb, autounmask_level):
4184 pkg_eb_visible = True
4186 if not pkg_eb_visible:
4189 if not self._pkg_visibility_check(pkg_eb, autounmask_level):
4194 def _equiv_binary_installed(self, pkg):
4195 build_time = pkg.build_time
4200 inst_pkg = self._pkg(pkg.cpv, "installed",
4201 pkg.root_config, installed=True)
4202 except PackageNotFound:
4205 return build_time == inst_pkg.build_time
4207 class _AutounmaskLevel(object):
4208 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
4209 "allow_missing_keywords", "allow_unmasks")
4212 self.allow_use_changes = False
4213 self.allow_license_changes = False
4214 self.allow_unstable_keywords = False
4215 self.allow_missing_keywords = False
4216 self.allow_unmasks = False
4218 def _autounmask_levels(self):
4220 Iterate over the different allowed things to unmask.
4224 2. USE + ~arch + license
4225 3. USE + ~arch + license + missing keywords
4226 4. USE + ~arch + license + masks
4227 5. USE + ~arch + license + missing keywords + masks
4230 * Do least invasive changes first.
4231 * Try unmasking alone before unmasking + missing keywords
4232 to avoid -9999 versions if possible
4235 if self._dynamic_config._autounmask is not True:
4238 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
4239 autounmask_level = self._AutounmaskLevel()
4241 autounmask_level.allow_use_changes = True
4242 yield autounmask_level
4244 autounmask_level.allow_license_changes = True
4245 yield autounmask_level
4247 for only_use_changes in (False,):
4249 autounmask_level.allow_unstable_keywords = (not only_use_changes)
4250 autounmask_level.allow_license_changes = (not only_use_changes)
4252 for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
4254 if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
4257 autounmask_level.allow_missing_keywords = missing_keyword
4258 autounmask_level.allow_unmasks = unmask
4260 yield autounmask_level
4263 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
4264 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
4266 default_selection = (pkg, existing)
4269 if pkg is not None and \
4271 not self._want_installed_pkg(pkg):
4274 if self._dynamic_config._autounmask is True:
4277 for autounmask_level in self._autounmask_levels():
4282 self._wrapped_select_pkg_highest_available_imp(
4283 root, atom, onlydeps=onlydeps,
4284 autounmask_level=autounmask_level)
4288 if self._dynamic_config._need_restart:
4292 # This ensures that we can fall back to an installed package
4293 # that may have been rejected in the autounmask path above.
4294 return default_selection
4296 return pkg, existing
4298 def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
4303 if trust_graph and pkg in self._dynamic_config.digraph:
4304 # Sometimes we need to temporarily disable
4305 # dynamic_config._autounmask, but for overall
4306 # consistency in dependency resolution, in most
4307 # cases we want to treat packages in the graph
4308 # as though they are visible.
4311 if not self._dynamic_config._autounmask or autounmask_level is None:
4314 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4315 root_config = self._frozen_config.roots[pkg.root]
4316 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4318 masked_by_unstable_keywords = False
4319 masked_by_missing_keywords = False
4320 missing_licenses = None
4321 masked_by_something_else = False
4322 masked_by_p_mask = False
4324 for reason in mreasons:
4325 hint = reason.unmask_hint
4328 masked_by_something_else = True
4329 elif hint.key == "unstable keyword":
4330 masked_by_unstable_keywords = True
4331 if hint.value == "**":
4332 masked_by_missing_keywords = True
4333 elif hint.key == "p_mask":
4334 masked_by_p_mask = True
4335 elif hint.key == "license":
4336 missing_licenses = hint.value
4338 masked_by_something_else = True
4340 if masked_by_something_else:
4343 if pkg in self._dynamic_config._needed_unstable_keywords:
4344 #If the package is already keyworded, remove the mask.
4345 masked_by_unstable_keywords = False
4346 masked_by_missing_keywords = False
4348 if pkg in self._dynamic_config._needed_p_mask_changes:
4349 #If the package is already keyworded, remove the mask.
4350 masked_by_p_mask = False
4352 if missing_licenses:
4353 #If the needed licenses are already unmasked, remove the mask.
4354 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
4356 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
4357 #Package has already been unmasked.
4360 if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
4361 (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
4362 (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
4363 (missing_licenses and not autounmask_level.allow_license_changes):
4364 #We are not allowed to do the needed changes.
4367 if masked_by_unstable_keywords:
4368 self._dynamic_config._needed_unstable_keywords.add(pkg)
4369 backtrack_infos = self._dynamic_config._backtrack_infos
4370 backtrack_infos.setdefault("config", {})
4371 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
4372 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
4374 if masked_by_p_mask:
4375 self._dynamic_config._needed_p_mask_changes.add(pkg)
4376 backtrack_infos = self._dynamic_config._backtrack_infos
4377 backtrack_infos.setdefault("config", {})
4378 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
4379 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
4381 if missing_licenses:
4382 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
4383 backtrack_infos = self._dynamic_config._backtrack_infos
4384 backtrack_infos.setdefault("config", {})
4385 backtrack_infos["config"].setdefault("needed_license_changes", set())
4386 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
4390 def _pkg_use_enabled(self, pkg, target_use=None):
4392 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
4393 If target_use is given, the need changes are computed to make the package useable.
4394 Example: target_use = { "foo": True, "bar": False }
4395 The flags target_use must be in the pkg's IUSE.
4398 return pkg.use.enabled
4399 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
4401 if target_use is None:
4402 if needed_use_config_change is None:
4403 return pkg.use.enabled
4405 return needed_use_config_change[0]
4407 if needed_use_config_change is not None:
4408 old_use = needed_use_config_change[0]
4410 old_changes = needed_use_config_change[1]
4411 new_changes = old_changes.copy()
4413 old_use = pkg.use.enabled
4418 for flag, state in target_use.items():
4419 real_flag = pkg.iuse.get_real_flag(flag)
4420 if real_flag is None:
4421 # Triggered by use-dep defaults.
4424 if real_flag not in old_use:
4425 if new_changes.get(real_flag) == False:
4427 new_changes[real_flag] = True
4430 if real_flag in old_use:
4431 if new_changes.get(real_flag) == True:
4433 new_changes[real_flag] = False
4434 new_use.update(old_use.difference(target_use))
4436 def want_restart_for_use_change(pkg, new_use):
4437 if pkg not in self._dynamic_config.digraph.nodes:
4440 for key in Package._dep_keys + ("LICENSE",):
4441 dep = pkg._metadata[key]
4442 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4443 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4445 if old_val != new_val:
4448 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4449 if not parent_atoms:
4452 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
4453 for ppkg, atom in parent_atoms:
4454 if not atom.use or \
4455 not any(x in atom.use.required for x in changes):
4462 if new_changes != old_changes:
4463 #Don't do the change if it violates REQUIRED_USE.
4464 required_use = pkg._metadata.get("REQUIRED_USE")
4465 if required_use and check_required_use(required_use, old_use,
4466 pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
4467 not check_required_use(required_use, new_use,
4468 pkg.iuse.is_valid_flag, eapi=pkg.eapi):
4471 if any(x in pkg.use.mask for x in new_changes) or \
4472 any(x in pkg.use.force for x in new_changes):
4475 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
4476 backtrack_infos = self._dynamic_config._backtrack_infos
4477 backtrack_infos.setdefault("config", {})
4478 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
4479 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
4480 if want_restart_for_use_change(pkg, new_use):
4481 self._dynamic_config._need_restart = True
4484 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
4485 root_config = self._frozen_config.roots[root]
4486 pkgsettings = self._frozen_config.pkgsettings[root]
4487 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
4488 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
4489 # List of acceptable packages, ordered by type preference.
4490 matched_packages = []
4491 matched_pkgs_ignore_use = []
4492 highest_version = None
4493 if not isinstance(atom, portage.dep.Atom):
4494 atom = portage.dep.Atom(atom)
4496 have_new_virt = atom_cp.startswith("virtual/") and \
4497 self._have_new_virt(root, atom_cp)
4498 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
4499 existing_node = None
4501 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
4502 usepkg = "--usepkg" in self._frozen_config.myopts
4503 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
4504 empty = "empty" in self._dynamic_config.myparams
4505 selective = "selective" in self._dynamic_config.myparams
4507 avoid_update = "--update" not in self._frozen_config.myopts
4508 dont_miss_updates = "--update" in self._frozen_config.myopts
4509 use_ebuild_visibility = self._frozen_config.myopts.get(
4510 '--use-ebuild-visibility', 'n') != 'n'
4511 reinstall_atoms = self._frozen_config.reinstall_atoms
4512 usepkg_exclude = self._frozen_config.usepkg_exclude
4513 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
4515 # Behavior of the "selective" parameter depends on
4516 # whether or not a package matches an argument atom.
4517 # If an installed package provides an old-style
4518 # virtual that is no longer provided by an available
4519 # package, the installed package may match an argument
4520 # atom even though none of the available packages do.
4521 # Therefore, "selective" logic does not consider
4522 # whether or not an installed package matches an
4523 # argument atom. It only considers whether or not
4524 # available packages match argument atoms, which is
4525 # represented by the found_available_arg flag.
4526 found_available_arg = False
4527 packages_with_invalid_use_config = []
4528 for find_existing_node in True, False:
4531 for db, pkg_type, built, installed, db_keys in dbs:
4534 if installed and not find_existing_node:
4535 want_reinstall = reinstall or empty or \
4536 (found_available_arg and not selective)
4537 if want_reinstall and matched_packages:
4540 # Ignore USE deps for the initial match since we want to
4541 # ensure that updates aren't missed solely due to the user's
4542 # USE configuration.
4543 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
4545 if pkg.cp != atom_cp and have_new_virt:
4546 # pull in a new-style virtual instead
4548 if pkg in self._dynamic_config._runtime_pkg_mask:
4549 # The package has been masked by the backtracking logic
4551 root_slot = (pkg.root, pkg.slot_atom)
4552 if pkg.built and root_slot in self._rebuild.rebuild_list:
4554 if (pkg.installed and
4555 root_slot in self._rebuild.reinstall_list):
4558 if not pkg.installed and \
4559 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
4560 modified_use=self._pkg_use_enabled(pkg)):
4563 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
4564 modified_use=self._pkg_use_enabled(pkg)):
4567 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
4568 modified_use=self._pkg_use_enabled(pkg))
4570 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
4571 (not pkg.installed or dont_miss_updates):
4572 # Check if a higher version was rejected due to user
4573 # USE configuration. The packages_with_invalid_use_config
4574 # list only contains unbuilt ebuilds since USE can't
4575 # be changed for built packages.
4576 higher_version_rejected = False
4577 repo_priority = pkg.repo_priority
4578 for rejected in packages_with_invalid_use_config:
4579 if rejected.cp != pkg.cp:
4582 higher_version_rejected = True
4584 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
4585 # If version is identical then compare
4586 # repo priority (see bug #350254).
4587 rej_repo_priority = rejected.repo_priority
4588 if rej_repo_priority is not None and \
4589 (repo_priority is None or
4590 rej_repo_priority > repo_priority):
4591 higher_version_rejected = True
4593 if higher_version_rejected:
4597 reinstall_for_flags = None
4599 if not pkg.installed or \
4600 (matched_packages and not avoid_update):
4601 # Only enforce visibility on installed packages
4602 # if there is at least one other visible package
4603 # available. By filtering installed masked packages
4604 # here, packages that have been masked since they
4605 # were installed can be automatically downgraded
4606 # to an unmasked version. NOTE: This code needs to
4607 # be consistent with masking behavior inside
4608 # _dep_check_composite_db, in order to prevent
4609 # incorrect choices in || deps like bug #351828.
4611 if not self._pkg_visibility_check(pkg, autounmask_level):
4614 # Enable upgrade or downgrade to a version
4615 # with visible KEYWORDS when the installed
4616 # version is masked by KEYWORDS, but never
4617 # reinstall the same exact version only due
4618 # to a KEYWORDS mask. See bug #252167.
4620 if pkg.type_name != "ebuild" and matched_packages:
4621 # Don't re-install a binary package that is
4622 # identical to the currently installed package
4623 # (see bug #354441).
4624 identical_binary = False
4625 if usepkg and pkg.installed:
4626 for selected_pkg in matched_packages:
4627 if selected_pkg.type_name == "binary" and \
4628 selected_pkg.cpv == pkg.cpv and \
4629 selected_pkg.build_time == \
4631 identical_binary = True
4634 if not identical_binary:
4635 # If the ebuild no longer exists or it's
4636 # keywords have been dropped, reject built
4637 # instances (installed or binary).
4638 # If --usepkgonly is enabled, assume that
4639 # the ebuild status should be ignored.
4640 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
4641 if pkg.installed and pkg.masks:
4643 elif not self._equiv_ebuild_visible(pkg,
4644 autounmask_level=autounmask_level):
4647 # Calculation of USE for unbuilt ebuilds is relatively
4648 # expensive, so it is only performed lazily, after the
4649 # above visibility checks are complete.
4653 for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
4654 if myarg.force_reinstall:
4657 except InvalidDependString:
4659 # masked by corruption
4661 if not installed and myarg:
4662 found_available_arg = True
4664 if atom.unevaluated_atom.use:
4665 #Make sure we don't miss a 'missing IUSE'.
4666 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
4667 # Don't add this to packages_with_invalid_use_config
4668 # since IUSE cannot be adjusted by the user.
4673 matched_pkgs_ignore_use.append(pkg)
4674 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
4676 for flag in atom.use.enabled:
4677 target_use[flag] = True
4678 for flag in atom.use.disabled:
4679 target_use[flag] = False
4680 use = self._pkg_use_enabled(pkg, target_use)
4682 use = self._pkg_use_enabled(pkg)
4685 can_adjust_use = not pkg.built
4686 is_valid_flag = pkg.iuse.is_valid_flag
4687 missing_enabled = frozenset(x for x in
4688 atom.use.missing_enabled if not is_valid_flag(x))
4689 missing_disabled = frozenset(x for x in
4690 atom.use.missing_disabled if not is_valid_flag(x))
4692 if atom.use.enabled:
4693 if any(x in atom.use.enabled for x in missing_disabled):
4695 can_adjust_use = False
4696 need_enabled = atom.use.enabled.difference(use)
4698 need_enabled = need_enabled.difference(missing_enabled)
4702 if any(x in pkg.use.mask for x in need_enabled):
4703 can_adjust_use = False
4705 if atom.use.disabled:
4706 if any(x in atom.use.disabled for x in missing_enabled):
4708 can_adjust_use = False
4709 need_disabled = atom.use.disabled.intersection(use)
4711 need_disabled = need_disabled.difference(missing_disabled)
4715 if any(x in pkg.use.force and x not in
4716 pkg.use.mask for x in need_disabled):
4717 can_adjust_use = False
4721 # Above we must ensure that this package has
4722 # absolutely no use.force, use.mask, or IUSE
4723 # issues that the user typically can't make
4724 # adjustments to solve (see bug #345979).
4725 # FIXME: Conditional USE deps complicate
4726 # issues. This code currently excludes cases
4727 # in which the user can adjust the parent
4728 # package's USE in order to satisfy the dep.
4729 packages_with_invalid_use_config.append(pkg)
4732 if pkg.cp == atom_cp:
4733 if highest_version is None:
4734 highest_version = pkg
4735 elif pkg > highest_version:
4736 highest_version = pkg
4737 # At this point, we've found the highest visible
4738 # match from the current repo. Any lower versions
4739 # from this repo are ignored, so this so the loop
4740 # will always end with a break statement below
4742 if find_existing_node:
4743 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4747 # Use PackageSet.findAtomForPackage()
4748 # for PROVIDE support.
4749 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
4750 if highest_version and \
4751 e_pkg.cp == atom_cp and \
4752 e_pkg < highest_version and \
4753 e_pkg.slot_atom != highest_version.slot_atom:
4754 # There is a higher version available in a
4755 # different slot, so this existing node is
4759 matched_packages.append(e_pkg)
4760 existing_node = e_pkg
4762 # Compare built package to current config and
4763 # reject the built package if necessary.
4764 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4765 ("--newuse" in self._frozen_config.myopts or \
4766 "--reinstall" in self._frozen_config.myopts or \
4767 (not installed and self._dynamic_config.myparams.get(
4768 "binpkg_respect_use") in ("y", "auto"))):
4769 iuses = pkg.iuse.all
4770 old_use = self._pkg_use_enabled(pkg)
4772 pkgsettings.setcpv(myeb)
4774 pkgsettings.setcpv(pkg)
4775 now_use = pkgsettings["PORTAGE_USE"].split()
4776 forced_flags = set()
4777 forced_flags.update(pkgsettings.useforce)
4778 forced_flags.update(pkgsettings.usemask)
4780 if myeb and not usepkgonly and not useoldpkg:
4781 cur_iuse = myeb.iuse.all
4782 reinstall_for_flags = self._reinstall_for_flags(pkg,
4783 forced_flags, old_use, iuses, now_use, cur_iuse)
4784 if reinstall_for_flags:
4785 if not pkg.installed:
4786 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4788 # Compare current config to installed package
4789 # and do not reinstall if possible.
4790 if not installed and not useoldpkg and \
4791 ("--newuse" in self._frozen_config.myopts or \
4792 "--reinstall" in self._frozen_config.myopts) and \
4793 cpv in vardb.match(atom):
4794 forced_flags = set()
4795 forced_flags.update(pkg.use.force)
4796 forced_flags.update(pkg.use.mask)
4797 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4798 old_use = inst_pkg.use.enabled
4799 old_iuse = inst_pkg.iuse.all
4800 cur_use = self._pkg_use_enabled(pkg)
4801 cur_iuse = pkg.iuse.all
4802 reinstall_for_flags = \
4803 self._reinstall_for_flags(pkg,
4804 forced_flags, old_use, old_iuse,
4806 if reinstall_for_flags:
4808 if reinstall_atoms.findAtomForPackage(pkg, \
4809 modified_use=self._pkg_use_enabled(pkg)):
4814 matched_oldpkg.append(pkg)
4815 matched_packages.append(pkg)
4816 if reinstall_for_flags:
4817 self._dynamic_config._reinstall_nodes[pkg] = \
4821 if not matched_packages:
4824 if "--debug" in self._frozen_config.myopts:
4825 for pkg in matched_packages:
4826 portage.writemsg("%s %s%s%s\n" % \
4827 ((pkg.type_name + ":").rjust(10),
4828 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4830 # Filter out any old-style virtual matches if they are
4831 # mixed with new-style virtual matches.
4833 if len(matched_packages) > 1 and \
4834 "virtual" == portage.catsplit(cp)[0]:
4835 for pkg in matched_packages:
4838 # Got a new-style virtual, so filter
4839 # out any old-style virtuals.
4840 matched_packages = [pkg for pkg in matched_packages \
4844 if existing_node is not None and \
4845 existing_node in matched_packages:
4846 return existing_node, existing_node
4848 if len(matched_packages) > 1:
4849 if rebuilt_binaries:
4853 for pkg in matched_packages:
4859 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4861 if built_pkg is not None and inst_pkg is not None:
4862 # Only reinstall if binary package BUILD_TIME is
4863 # non-empty, in order to avoid cases like to
4864 # bug #306659 where BUILD_TIME fields are missing
4865 # in local and/or remote Packages file.
4866 built_timestamp = built_pkg.build_time
4867 installed_timestamp = inst_pkg.build_time
4869 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4871 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4872 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4873 if built_timestamp and \
4874 built_timestamp > installed_timestamp and \
4875 built_timestamp >= minimal_timestamp:
4876 return built_pkg, existing_node
4878 #Don't care if the binary has an older BUILD_TIME than the installed
4879 #package. This is for closely tracking a binhost.
4880 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4882 if built_timestamp and \
4883 built_timestamp != installed_timestamp:
4884 return built_pkg, existing_node
4886 for pkg in matched_packages:
4887 if pkg.installed and pkg.invalid:
4888 matched_packages = [x for x in \
4889 matched_packages if x is not pkg]
4892 for pkg in matched_packages:
4893 if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
4894 return pkg, existing_node
4896 visible_matches = []
4898 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4899 if self._pkg_visibility_check(pkg, autounmask_level)]
4900 if not visible_matches:
4901 visible_matches = [pkg.cpv for pkg in matched_packages \
4902 if self._pkg_visibility_check(pkg, autounmask_level)]
4904 bestmatch = portage.best(visible_matches)
4906 # all are masked, so ignore visibility
4907 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4908 matched_packages = [pkg for pkg in matched_packages \
4909 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4911 # ordered by type preference ("ebuild" type is the last resort)
4912 return matched_packages[-1], existing_node
4914 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4916 Select packages that have already been added to the graph or
4917 those that are installed and have not been scheduled for
4920 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4921 matches = graph_db.match_pkgs(atom)
4924 pkg = matches[-1] # highest match
4925 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4926 return pkg, in_graph
4928 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4930 Select packages that are installed.
4932 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4936 if len(matches) > 1:
4937 matches.reverse() # ascending order
4938 unmasked = [pkg for pkg in matches if \
4939 self._pkg_visibility_check(pkg)]
4941 if len(unmasked) == 1:
4944 # Account for packages with masks (like KEYWORDS masks)
4945 # that are usually ignored in visibility checks for
4946 # installed packages, in order to handle cases like
4948 unmasked = [pkg for pkg in matches if not pkg.masks]
4951 if len(matches) > 1:
4952 # Now account for packages for which existing
4953 # ebuilds are masked or unavailable (bug #445506).
4954 unmasked = [pkg for pkg in matches if
4955 self._equiv_ebuild_visible(pkg)]
4959 pkg = matches[-1] # highest match
4960 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4961 return pkg, in_graph
4963 def _complete_graph(self, required_sets=None):
4965 Add any deep dependencies of required sets (args, system, world) that
4966 have not been pulled into the graph yet. This ensures that the graph
4967 is consistent such that initially satisfied deep dependencies are not
4968 broken in the new graph. Initially unsatisfied dependencies are
4969 irrelevant since we only want to avoid breaking dependencies that are
4970 initially satisfied.
4972 Since this method can consume enough time to disturb users, it is
4973 currently only enabled by the --complete-graph option.
4975 @param required_sets: contains required sets (currently only used
4976 for depclean and prune removal operations)
4977 @type required_sets: dict
4979 if "--buildpkgonly" in self._frozen_config.myopts or \
4980 "recurse" not in self._dynamic_config.myparams:
4983 complete_if_new_use = self._dynamic_config.myparams.get(
4984 "complete_if_new_use", "y") == "y"
4985 complete_if_new_ver = self._dynamic_config.myparams.get(
4986 "complete_if_new_ver", "y") == "y"
4987 rebuild_if_new_slot = self._dynamic_config.myparams.get(
4988 "rebuild_if_new_slot", "y") == "y"
4989 complete_if_new_slot = rebuild_if_new_slot
4991 if "complete" not in self._dynamic_config.myparams and \
4992 (complete_if_new_use or
4993 complete_if_new_ver or complete_if_new_slot):
4994 # Enable complete mode if an installed package will change somehow.
4996 version_change = False
4997 for node in self._dynamic_config.digraph:
4998 if not isinstance(node, Package) or \
4999 node.operation != "merge":
5001 vardb = self._frozen_config.roots[
5002 node.root].trees["vartree"].dbapi
5004 if complete_if_new_use or complete_if_new_ver:
5005 inst_pkg = vardb.match_pkgs(node.slot_atom)
5006 if inst_pkg and inst_pkg[0].cp == node.cp:
5007 inst_pkg = inst_pkg[0]
5008 if complete_if_new_ver:
5009 if inst_pkg < node or node < inst_pkg:
5010 version_change = True
5012 elif not (inst_pkg.slot == node.slot and
5013 inst_pkg.sub_slot == node.sub_slot):
5014 # slot/sub-slot change without revbump gets
5015 # similar treatment to a version change
5016 version_change = True
5019 # Intersect enabled USE with IUSE, in order to
5020 # ignore forced USE from implicit IUSE flags, since
5021 # they're probably irrelevant and they are sensitive
5022 # to use.mask/force changes in the profile.
5023 if complete_if_new_use and \
5024 (node.iuse.all != inst_pkg.iuse.all or
5025 self._pkg_use_enabled(node).intersection(node.iuse.all) !=
5026 self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
5030 if complete_if_new_slot:
5031 cp_list = vardb.match_pkgs(Atom(node.cp))
5032 if (cp_list and cp_list[0].cp == node.cp and
5033 not any(node.slot == pkg.slot and
5034 node.sub_slot == pkg.sub_slot for pkg in cp_list)):
5035 version_change = True
5038 if use_change or version_change:
5039 self._dynamic_config.myparams["complete"] = True
5041 if "complete" not in self._dynamic_config.myparams:
5046 # Put the depgraph into a mode that causes it to only
5047 # select packages that have already been added to the
5048 # graph or those that are installed and have not been
5049 # scheduled for replacement. Also, toggle the "deep"
5050 # parameter so that all dependencies are traversed and
5052 self._dynamic_config._complete_mode = True
5053 self._select_atoms = self._select_atoms_from_graph
5054 if "remove" in self._dynamic_config.myparams:
5055 self._select_package = self._select_pkg_from_installed
5057 self._select_package = self._select_pkg_from_graph
5058 self._dynamic_config._traverse_ignored_deps = True
5059 already_deep = self._dynamic_config.myparams.get("deep") is True
5060 if not already_deep:
5061 self._dynamic_config.myparams["deep"] = True
5063 # Invalidate the package selection cache, since
5064 # _select_package has just changed implementations.
5065 for trees in self._dynamic_config._filtered_trees.values():
5066 trees["porttree"].dbapi._clear_cache()
5068 args = self._dynamic_config._initial_arg_list[:]
5069 for root in self._frozen_config.roots:
5070 if root != self._frozen_config.target_root and \
5071 ("remove" in self._dynamic_config.myparams or
5072 self._frozen_config.myopts.get("--root-deps") is not None):
5073 # Only pull in deps for the relevant root.
5075 depgraph_sets = self._dynamic_config.sets[root]
5076 required_set_names = self._frozen_config._required_set_names.copy()
5077 remaining_args = required_set_names.copy()
5078 if required_sets is None or root not in required_sets:
5081 # Removal actions may override sets with temporary
5082 # replacements that have had atoms removed in order
5083 # to implement --deselect behavior.
5084 required_set_names = set(required_sets[root])
5085 depgraph_sets.sets.clear()
5086 depgraph_sets.sets.update(required_sets[root])
5087 if "remove" not in self._dynamic_config.myparams and \
5088 root == self._frozen_config.target_root and \
5090 remaining_args.difference_update(depgraph_sets.sets)
5091 if not remaining_args and \
5092 not self._dynamic_config._ignored_deps and \
5093 not self._dynamic_config._dep_stack:
5095 root_config = self._frozen_config.roots[root]
5096 for s in required_set_names:
5097 pset = depgraph_sets.sets.get(s)
5099 pset = root_config.sets[s]
5100 atom = SETPREFIX + s
5101 args.append(SetArg(arg=atom, pset=pset,
5102 root_config=root_config))
5104 self._set_args(args)
5105 for arg in self._expand_set_args(args, add_to_digraph=True):
5106 for atom in arg.pset.getAtoms():
5107 self._dynamic_config._dep_stack.append(
5108 Dependency(atom=atom, root=arg.root_config.root,
5112 if self._dynamic_config._ignored_deps:
5113 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
5114 self._dynamic_config._ignored_deps = []
5115 if not self._create_graph(allow_unsatisfied=True):
5117 # Check the unsatisfied deps to see if any initially satisfied deps
5118 # will become unsatisfied due to an upgrade. Initially unsatisfied
5119 # deps are irrelevant since we only want to avoid breaking deps
5120 # that are initially satisfied.
5121 while self._dynamic_config._unsatisfied_deps:
5122 dep = self._dynamic_config._unsatisfied_deps.pop()
5123 vardb = self._frozen_config.roots[
5124 dep.root].trees["vartree"].dbapi
5125 matches = vardb.match_pkgs(dep.atom)
5127 self._dynamic_config._initially_unsatisfied_deps.append(dep)
5129 # An scheduled installation broke a deep dependency.
5130 # Add the installed package to the graph so that it
5131 # will be appropriately reported as a slot collision
5132 # (possibly solvable via backtracking).
5133 pkg = matches[-1] # highest match
5134 if not self._add_pkg(pkg, dep):
5136 if not self._create_graph(allow_unsatisfied=True):
5140 def _pkg(self, cpv, type_name, root_config, installed=False,
5141 onlydeps=False, myrepo = None):
5143 Get a package instance from the cache, or create a new
5144 one if necessary. Raises PackageNotFound from aux_get if it
5145 failures for some reason (package does not exist or is
5149 # Ensure that we use the specially optimized RootConfig instance
5150 # that refers to FakeVartree instead of the real vartree.
5151 root_config = self._frozen_config.roots[root_config.root]
5152 pkg = self._frozen_config._pkg_cache.get(
5153 Package._gen_hash_key(cpv=cpv, type_name=type_name,
5154 repo_name=myrepo, root_config=root_config,
5155 installed=installed, onlydeps=onlydeps))
5156 if pkg is None and onlydeps and not installed:
5157 # Maybe it already got pulled in as a "merge" node.
5158 pkg = self._dynamic_config.mydbapi[root_config.root].get(
5159 Package._gen_hash_key(cpv=cpv, type_name=type_name,
5160 repo_name=myrepo, root_config=root_config,
5161 installed=installed, onlydeps=False))
5164 tree_type = self.pkg_tree_map[type_name]
5165 db = root_config.trees[tree_type].dbapi
5166 db_keys = list(self._frozen_config._trees_orig[root_config.root][
5167 tree_type].dbapi._aux_cache_keys)
5170 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
5172 raise portage.exception.PackageNotFound(cpv)
5174 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
5175 installed=installed, metadata=metadata, onlydeps=onlydeps,
5176 root_config=root_config, type_name=type_name)
5178 self._frozen_config._pkg_cache[pkg] = pkg
5180 if not self._pkg_visibility_check(pkg) and \
5181 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
5182 slot_key = (pkg.root, pkg.slot_atom)
5183 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
5184 if other_pkg is None or pkg > other_pkg:
5185 self._frozen_config._highest_license_masked[slot_key] = pkg
5189 def _validate_blockers(self):
5190 """Remove any blockers from the digraph that do not match any of the
5191 packages within the graph. If necessary, create hard deps to ensure
5192 correct merge order such that mutually blocking packages are never
5193 installed simultaneously. Also add runtime blockers from all installed
5194 packages if any of them haven't been added already (bug 128809)."""
5196 if "--buildpkgonly" in self._frozen_config.myopts or \
5197 "--nodeps" in self._frozen_config.myopts:
5201 # Pull in blockers from all installed packages that haven't already
5202 # been pulled into the depgraph, in order to ensure that they are
5203 # respected (bug 128809). Due to the performance penalty that is
5204 # incurred by all the additional dep_check calls that are required,
5205 # blockers returned from dep_check are cached on disk by the
5206 # BlockerCache class.
5208 # For installed packages, always ignore blockers from DEPEND since
5209 # only runtime dependencies should be relevant for packages that
5210 # are already built.
5211 dep_keys = Package._runtime_keys
5212 for myroot in self._frozen_config.trees:
5214 if self._frozen_config.myopts.get("--root-deps") is not None and \
5215 myroot != self._frozen_config.target_root:
5218 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
5219 pkgsettings = self._frozen_config.pkgsettings[myroot]
5220 root_config = self._frozen_config.roots[myroot]
5221 final_db = self._dynamic_config.mydbapi[myroot]
5223 blocker_cache = BlockerCache(myroot, vardb)
5224 stale_cache = set(blocker_cache)
5227 stale_cache.discard(cpv)
5228 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
5230 pkg in self._dynamic_config._traversed_pkg_deps
5232 # Check for masked installed packages. Only warn about
5233 # packages that are in the graph in order to avoid warning
5234 # about those that will be automatically uninstalled during
5235 # the merge process or by --depclean. Always warn about
5236 # packages masked by license, since the user likely wants
5237 # to adjust ACCEPT_LICENSE.
5239 if not self._pkg_visibility_check(pkg,
5240 trust_graph=False) and \
5241 (pkg_in_graph or 'LICENSE' in pkg.masks):
5242 self._dynamic_config._masked_installed.add(pkg)
5244 self._check_masks(pkg)
5246 blocker_atoms = None
5252 self._dynamic_config._blocker_parents.child_nodes(pkg))
5257 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
5261 # Select just the runtime blockers.
5262 blockers = [blocker for blocker in blockers \
5263 if blocker.priority.runtime or \
5264 blocker.priority.runtime_post]
5265 if blockers is not None:
5266 blockers = set(blocker.atom for blocker in blockers)
5268 # If this node has any blockers, create a "nomerge"
5269 # node for it so that they can be enforced.
5270 self._spinner_update()
5271 blocker_data = blocker_cache.get(cpv)
5272 if blocker_data is not None and \
5273 blocker_data.counter != pkg.counter:
5276 # If blocker data from the graph is available, use
5277 # it to validate the cache and update the cache if
5279 if blocker_data is not None and \
5280 blockers is not None:
5281 if not blockers.symmetric_difference(
5282 blocker_data.atoms):
5286 if blocker_data is None and \
5287 blockers is not None:
5288 # Re-use the blockers from the graph.
5289 blocker_atoms = sorted(blockers)
5291 blocker_cache.BlockerData(pkg.counter, blocker_atoms)
5292 blocker_cache[pkg.cpv] = blocker_data
5296 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
5298 # Use aux_get() to trigger FakeVartree global
5299 # updates on *DEPEND when appropriate.
5300 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5301 # It is crucial to pass in final_db here in order to
5302 # optimize dep_check calls by eliminating atoms via
5303 # dep_wordreduce and dep_eval calls.
5305 success, atoms = portage.dep_check(depstr,
5306 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
5307 trees=self._dynamic_config._graph_trees, myroot=myroot)
5310 except Exception as e:
5311 # This is helpful, for example, if a ValueError
5312 # is thrown from cpv_expand due to multiple
5313 # matches (this can happen if an atom lacks a
5315 show_invalid_depstring_notice(
5316 pkg, depstr, "%s" % (e,))
5320 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
5321 if replacement_pkg and \
5322 replacement_pkg[0].operation == "merge":
5323 # This package is being replaced anyway, so
5324 # ignore invalid dependencies so as not to
5325 # annoy the user too much (otherwise they'd be
5326 # forced to manually unmerge it first).
5328 show_invalid_depstring_notice(pkg, depstr, atoms)
5330 blocker_atoms = [myatom for myatom in atoms \
5332 blocker_atoms.sort()
5333 blocker_cache[cpv] = \
5334 blocker_cache.BlockerData(pkg.counter, blocker_atoms)
5337 for atom in blocker_atoms:
5338 blocker = Blocker(atom=atom,
5340 priority=self._priority(runtime=True),
5342 self._dynamic_config._blocker_parents.add(blocker, pkg)
5343 except portage.exception.InvalidAtom as e:
5344 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5345 show_invalid_depstring_notice(
5346 pkg, depstr, "Invalid Atom: %s" % (e,))
5348 for cpv in stale_cache:
5349 del blocker_cache[cpv]
5350 blocker_cache.flush()
5353 # Discard any "uninstall" tasks scheduled by previous calls
5354 # to this method, since those tasks may not make sense given
5355 # the current graph state.
5356 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
5357 if previous_uninstall_tasks:
5358 self._dynamic_config._blocker_uninstalls = digraph()
5359 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
5361 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
5362 self._spinner_update()
5363 root_config = self._frozen_config.roots[blocker.root]
5364 virtuals = root_config.settings.getvirtuals()
5365 myroot = blocker.root
5366 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
5367 final_db = self._dynamic_config.mydbapi[myroot]
5369 provider_virtual = False
5370 if blocker.cp in virtuals and \
5371 not self._have_new_virt(blocker.root, blocker.cp):
5372 provider_virtual = True
5374 # Use this to check PROVIDE for each matched package
5376 atom_set = InternalPackageSet(
5377 initial_atoms=[blocker.atom])
5379 if provider_virtual:
5381 for provider_entry in virtuals[blocker.cp]:
5382 atoms.append(Atom(blocker.atom.replace(
5383 blocker.cp, provider_entry.cp, 1)))
5385 atoms = [blocker.atom]
5387 blocked_initial = set()
5389 for pkg in initial_db.match_pkgs(atom):
5390 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5391 blocked_initial.add(pkg)
5393 blocked_final = set()
5395 for pkg in final_db.match_pkgs(atom):
5396 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5397 blocked_final.add(pkg)
5399 if not blocked_initial and not blocked_final:
5400 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
5401 self._dynamic_config._blocker_parents.remove(blocker)
5402 # Discard any parents that don't have any more blockers.
5403 for pkg in parent_pkgs:
5404 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
5405 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
5406 self._dynamic_config._blocker_parents.remove(pkg)
5408 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
5409 unresolved_blocks = False
5410 depends_on_order = set()
5411 for pkg in blocked_initial:
5412 if pkg.slot_atom == parent.slot_atom and \
5413 not blocker.atom.blocker.overlap.forbid:
5414 # New !!atom blockers do not allow temporary
5415 # simulaneous installation, so unlike !atom
5416 # blockers, !!atom blockers aren't ignored
5417 # when they match other packages occupying
5420 if parent.installed:
5421 # Two currently installed packages conflict with
5422 # eachother. Ignore this case since the damage
5423 # is already done and this would be likely to
5424 # confuse users if displayed like a normal blocker.
5427 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5429 if parent.operation == "merge":
5430 # Maybe the blocked package can be replaced or simply
5431 # unmerged to resolve this block.
5432 depends_on_order.add((pkg, parent))
5434 # None of the above blocker resolutions techniques apply,
5435 # so apparently this one is unresolvable.
5436 unresolved_blocks = True
5437 for pkg in blocked_final:
5438 if pkg.slot_atom == parent.slot_atom and \
5439 not blocker.atom.blocker.overlap.forbid:
5440 # New !!atom blockers do not allow temporary
5441 # simulaneous installation, so unlike !atom
5442 # blockers, !!atom blockers aren't ignored
5443 # when they match other packages occupying
5446 if parent.operation == "nomerge" and \
5447 pkg.operation == "nomerge":
5448 # This blocker will be handled the next time that a
5449 # merge of either package is triggered.
5452 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5454 # Maybe the blocking package can be
5455 # unmerged to resolve this block.
5456 if parent.operation == "merge" and pkg.installed:
5457 depends_on_order.add((pkg, parent))
5459 elif parent.operation == "nomerge":
5460 depends_on_order.add((parent, pkg))
5462 # None of the above blocker resolutions techniques apply,
5463 # so apparently this one is unresolvable.
5464 unresolved_blocks = True
5466 # Make sure we don't unmerge any package that have been pulled
5468 if not unresolved_blocks and depends_on_order:
5469 for inst_pkg, inst_task in depends_on_order:
5470 if self._dynamic_config.digraph.contains(inst_pkg) and \
5471 self._dynamic_config.digraph.parent_nodes(inst_pkg):
5472 unresolved_blocks = True
5475 if not unresolved_blocks and depends_on_order:
5476 for inst_pkg, inst_task in depends_on_order:
5477 uninst_task = Package(built=inst_pkg.built,
5478 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5479 metadata=inst_pkg._metadata,
5480 operation="uninstall",
5481 root_config=inst_pkg.root_config,
5482 type_name=inst_pkg.type_name)
5483 # Enforce correct merge order with a hard dep.
5484 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
5485 priority=BlockerDepPriority.instance)
5486 # Count references to this blocker so that it can be
5487 # invalidated after nodes referencing it have been
5489 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
5490 if not unresolved_blocks and not depends_on_order:
5491 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
5492 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
5493 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
5494 self._dynamic_config._blocker_parents.remove(blocker)
5495 if not self._dynamic_config._blocker_parents.child_nodes(parent):
5496 self._dynamic_config._blocker_parents.remove(parent)
5497 if unresolved_blocks:
5498 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
5502 def _accept_blocker_conflicts(self):
5504 for x in ("--buildpkgonly", "--fetchonly",
5505 "--fetch-all-uri", "--nodeps"):
5506 if x in self._frozen_config.myopts:
5511 def _merge_order_bias(self, mygraph):
5513 For optimal leaf node selection, promote deep system runtime deps and
5514 order nodes from highest to lowest overall reference count.
5518 for node in mygraph.order:
5519 node_info[node] = len(mygraph.parent_nodes(node))
5520 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
5522 def cmp_merge_preference(node1, node2):
5524 if node1.operation == 'uninstall':
5525 if node2.operation == 'uninstall':
5529 if node2.operation == 'uninstall':
5530 if node1.operation == 'uninstall':
5534 node1_sys = node1 in deep_system_deps
5535 node2_sys = node2 in deep_system_deps
5536 if node1_sys != node2_sys:
5541 return node_info[node2] - node_info[node1]
5543 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
5545 def altlist(self, reversed=False):
5547 while self._dynamic_config._serialized_tasks_cache is None:
5548 self._resolve_conflicts()
5550 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
5551 self._serialize_tasks()
5552 except self._serialize_tasks_retry:
5555 retlist = self._dynamic_config._serialized_tasks_cache[:]
5560 def _implicit_libc_deps(self, mergelist, graph):
5562 Create implicit dependencies on libc, in order to ensure that libc
5563 is installed as early as possible (see bug #303567).
5566 implicit_libc_roots = (self._frozen_config._running_root.root,)
5567 for root in implicit_libc_roots:
5568 graphdb = self._dynamic_config.mydbapi[root]
5569 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5570 for atom in self._expand_virt_from_graph(root,
5571 portage.const.LIBC_PACKAGE_ATOM):
5574 match = graphdb.match_pkgs(atom)
5578 if pkg.operation == "merge" and \
5579 not vardb.cpv_exists(pkg.cpv):
5580 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
5585 earlier_libc_pkgs = set()
5587 for pkg in mergelist:
5588 if not isinstance(pkg, Package):
5589 # a satisfied blocker
5591 root_libc_pkgs = libc_pkgs.get(pkg.root)
5592 if root_libc_pkgs is not None and \
5593 pkg.operation == "merge":
5594 if pkg in root_libc_pkgs:
5595 earlier_libc_pkgs.add(pkg)
5597 for libc_pkg in root_libc_pkgs:
5598 if libc_pkg in earlier_libc_pkgs:
5599 graph.add(libc_pkg, pkg,
5600 priority=DepPriority(buildtime=True))
5602 def schedulerGraph(self):
5604 The scheduler graph is identical to the normal one except that
5605 uninstall edges are reversed in specific cases that require
5606 conflicting packages to be temporarily installed simultaneously.
5607 This is intended for use by the Scheduler in it's parallelization
5608 logic. It ensures that temporary simultaneous installation of
5609 conflicting packages is avoided when appropriate (especially for
5610 !!atom blockers), but allowed in specific cases that require it.
5612 Note that this method calls break_refs() which alters the state of
5613 internal Package instances such that this depgraph instance should
5614 not be used to perform any more calculations.
5617 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
5618 mergelist = self.altlist()
5619 self._implicit_libc_deps(mergelist,
5620 self._dynamic_config._scheduler_graph)
5622 # Break DepPriority.satisfied attributes which reference
5623 # installed Package instances.
5624 for parents, children, node in \
5625 self._dynamic_config._scheduler_graph.nodes.values():
5626 for priorities in chain(parents.values(), children.values()):
5627 for priority in priorities:
5628 if priority.satisfied:
5629 priority.satisfied = True
5631 pkg_cache = self._frozen_config._pkg_cache
5632 graph = self._dynamic_config._scheduler_graph
5633 trees = self._frozen_config.trees
5634 pruned_pkg_cache = {}
5635 for key, pkg in pkg_cache.items():
5636 if pkg in graph or \
5637 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
5638 pruned_pkg_cache[key] = pkg
5641 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
5645 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
5649 def break_refs(self):
5651 Break any references in Package instances that lead back to the depgraph.
5652 This is useful if you want to hold references to packages without also
5653 holding the depgraph on the heap. It should only be called after the
5654 depgraph and _frozen_config will not be used for any more calculations.
5656 for root_config in self._frozen_config.roots.values():
5657 root_config.update(self._frozen_config._trees_orig[
5658 root_config.root]["root_config"])
5659 # Both instances are now identical, so discard the
5660 # original which should have no other references.
5661 self._frozen_config._trees_orig[
5662 root_config.root]["root_config"] = root_config
5664 def _resolve_conflicts(self):
5666 if "complete" not in self._dynamic_config.myparams and \
5667 self._dynamic_config._allow_backtracking and \
5668 self._dynamic_config._slot_collision_nodes and \
5669 not self._accept_blocker_conflicts():
5670 self._dynamic_config.myparams["complete"] = True
5672 if not self._complete_graph():
5673 raise self._unknown_internal_error()
5675 self._process_slot_conflicts()
5677 self._slot_operator_trigger_reinstalls()
5679 if not self._validate_blockers():
5680 self._dynamic_config._skip_restart = True
5681 raise self._unknown_internal_error()
5683 def _serialize_tasks(self):
5685 debug = "--debug" in self._frozen_config.myopts
5688 writemsg("\ndigraph:\n\n", noiselevel=-1)
5689 self._dynamic_config.digraph.debug_print()
5690 writemsg("\n", noiselevel=-1)
5692 scheduler_graph = self._dynamic_config.digraph.copy()
5694 if '--nodeps' in self._frozen_config.myopts:
5695 # Preserve the package order given on the command line.
5696 return ([node for node in scheduler_graph \
5697 if isinstance(node, Package) \
5698 and node.operation == 'merge'], scheduler_graph)
5700 mygraph=self._dynamic_config.digraph.copy()
5702 removed_nodes = set()
5704 # Prune off all DependencyArg instances since they aren't
5705 # needed, and because of nested sets this is faster than doing
5706 # it with multiple digraph.root_nodes() calls below. This also
5707 # takes care of nested sets that have circular references,
5708 # which wouldn't be matched by digraph.root_nodes().
5709 for node in mygraph:
5710 if isinstance(node, DependencyArg):
5711 removed_nodes.add(node)
5713 mygraph.difference_update(removed_nodes)
5714 removed_nodes.clear()
5716 # Prune "nomerge" root nodes if nothing depends on them, since
5717 # otherwise they slow down merge order calculation. Don't remove
5718 # non-root nodes since they help optimize merge order in some cases
5719 # such as revdep-rebuild.
5722 for node in mygraph.root_nodes():
5723 if not isinstance(node, Package) or \
5724 node.installed or node.onlydeps:
5725 removed_nodes.add(node)
5727 self._spinner_update()
5728 mygraph.difference_update(removed_nodes)
5729 if not removed_nodes:
5731 removed_nodes.clear()
5732 self._merge_order_bias(mygraph)
5733 def cmp_circular_bias(n1, n2):
5735 RDEPEND is stronger than PDEPEND and this function
5736 measures such a strength bias within a circular
5737 dependency relationship.
5739 n1_n2_medium = n2 in mygraph.child_nodes(n1,
5740 ignore_priority=priority_range.ignore_medium_soft)
5741 n2_n1_medium = n1 in mygraph.child_nodes(n2,
5742 ignore_priority=priority_range.ignore_medium_soft)
5743 if n1_n2_medium == n2_n1_medium:
5748 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
5750 # Contains uninstall tasks that have been scheduled to
5751 # occur after overlapping blockers have been installed.
5752 scheduled_uninstalls = set()
5753 # Contains any Uninstall tasks that have been ignored
5754 # in order to avoid the circular deps code path. These
5755 # correspond to blocker conflicts that could not be
5757 ignored_uninstall_tasks = set()
5758 have_uninstall_task = False
5759 complete = "complete" in self._dynamic_config.myparams
5762 def get_nodes(**kwargs):
5764 Returns leaf nodes excluding Uninstall instances
5765 since those should be executed as late as possible.
5767 return [node for node in mygraph.leaf_nodes(**kwargs) \
5768 if isinstance(node, Package) and \
5769 (node.operation != "uninstall" or \
5770 node in scheduled_uninstalls)]
5772 # sys-apps/portage needs special treatment if ROOT="/"
5773 running_root = self._frozen_config._running_root.root
5774 runtime_deps = InternalPackageSet(
5775 initial_atoms=[PORTAGE_PACKAGE_ATOM])
5776 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
5777 PORTAGE_PACKAGE_ATOM)
5778 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
5779 PORTAGE_PACKAGE_ATOM)
5782 running_portage = running_portage[0]
5784 running_portage = None
5786 if replacement_portage:
5787 replacement_portage = replacement_portage[0]
5789 replacement_portage = None
5791 if replacement_portage == running_portage:
5792 replacement_portage = None
5794 if running_portage is not None:
5796 portage_rdepend = self._select_atoms_highest_available(
5797 running_root, running_portage._metadata["RDEPEND"],
5798 myuse=self._pkg_use_enabled(running_portage),
5799 parent=running_portage, strict=False)
5800 except portage.exception.InvalidDependString as e:
5801 portage.writemsg("!!! Invalid RDEPEND in " + \
5802 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5803 (running_root, running_portage.cpv, e), noiselevel=-1)
5805 portage_rdepend = {running_portage : []}
5806 for atoms in portage_rdepend.values():
5807 runtime_deps.update(atom for atom in atoms \
5808 if not atom.blocker)
5810 # Merge libc asap, in order to account for implicit
5811 # dependencies. See bug #303567.
5812 implicit_libc_roots = (running_root,)
5813 for root in implicit_libc_roots:
5815 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5816 graphdb = self._dynamic_config.mydbapi[root]
5817 for atom in self._expand_virt_from_graph(root,
5818 portage.const.LIBC_PACKAGE_ATOM):
5821 match = graphdb.match_pkgs(atom)
5825 if pkg.operation == "merge" and \
5826 not vardb.cpv_exists(pkg.cpv):
5830 # If there's also an os-headers upgrade, we need to
5831 # pull that in first. See bug #328317.
5832 for atom in self._expand_virt_from_graph(root,
5833 portage.const.OS_HEADERS_PACKAGE_ATOM):
5836 match = graphdb.match_pkgs(atom)
5840 if pkg.operation == "merge" and \
5841 not vardb.cpv_exists(pkg.cpv):
5842 asap_nodes.append(pkg)
5844 asap_nodes.extend(libc_pkgs)
5846 def gather_deps(ignore_priority, mergeable_nodes,
5847 selected_nodes, node):
5849 Recursively gather a group of nodes that RDEPEND on
5850 eachother. This ensures that they are merged as a group
5851 and get their RDEPENDs satisfied as soon as possible.
5853 if node in selected_nodes:
5855 if node not in mergeable_nodes:
5857 if node == replacement_portage and \
5858 mygraph.child_nodes(node,
5859 ignore_priority=priority_range.ignore_medium_soft):
5860 # Make sure that portage always has all of it's
5861 # RDEPENDs installed first.
5863 selected_nodes.add(node)
5864 for child in mygraph.child_nodes(node,
5865 ignore_priority=ignore_priority):
5866 if not gather_deps(ignore_priority,
5867 mergeable_nodes, selected_nodes, child):
5871 def ignore_uninst_or_med(priority):
5872 if priority is BlockerDepPriority.instance:
5874 return priority_range.ignore_medium(priority)
5876 def ignore_uninst_or_med_soft(priority):
5877 if priority is BlockerDepPriority.instance:
5879 return priority_range.ignore_medium_soft(priority)
5881 tree_mode = "--tree" in self._frozen_config.myopts
5882 # Tracks whether or not the current iteration should prefer asap_nodes
5883 # if available. This is set to False when the previous iteration
5884 # failed to select any nodes. It is reset whenever nodes are
5885 # successfully selected.
5888 # Controls whether or not the current iteration should drop edges that
5889 # are "satisfied" by installed packages, in order to solve circular
5890 # dependencies. The deep runtime dependencies of installed packages are
5891 # not checked in this case (bug #199856), so it must be avoided
5892 # whenever possible.
5893 drop_satisfied = False
5895 # State of variables for successive iterations that loosen the
5896 # criteria for node selection.
5898 # iteration prefer_asap drop_satisfied
5903 # If no nodes are selected on the last iteration, it is due to
5904 # unresolved blockers or circular dependencies.
5907 self._spinner_update()
5908 selected_nodes = None
5909 ignore_priority = None
5910 if drop_satisfied or (prefer_asap and asap_nodes):
5911 priority_range = DepPrioritySatisfiedRange
5913 priority_range = DepPriorityNormalRange
5914 if prefer_asap and asap_nodes:
5915 # ASAP nodes are merged before their soft deps. Go ahead and
5916 # select root nodes here if necessary, since it's typical for
5917 # the parent to have been removed from the graph already.
5918 asap_nodes = [node for node in asap_nodes \
5919 if mygraph.contains(node)]
5920 for i in range(priority_range.SOFT,
5921 priority_range.MEDIUM_SOFT + 1):
5922 ignore_priority = priority_range.ignore_priority[i]
5923 for node in asap_nodes:
5924 if not mygraph.child_nodes(node,
5925 ignore_priority=ignore_priority):
5926 selected_nodes = [node]
5927 asap_nodes.remove(node)
5932 if not selected_nodes and \
5933 not (prefer_asap and asap_nodes):
5934 for i in range(priority_range.NONE,
5935 priority_range.MEDIUM_SOFT + 1):
5936 ignore_priority = priority_range.ignore_priority[i]
5937 nodes = get_nodes(ignore_priority=ignore_priority)
5939 # If there is a mixture of merges and uninstalls,
5940 # do the uninstalls first.
5941 good_uninstalls = None
5943 good_uninstalls = []
5945 if node.operation == "uninstall":
5946 good_uninstalls.append(node)
5949 nodes = good_uninstalls
5953 if good_uninstalls or len(nodes) == 1 or \
5954 (ignore_priority is None and \
5955 not asap_nodes and not tree_mode):
5956 # Greedily pop all of these nodes since no
5957 # relationship has been ignored. This optimization
5958 # destroys --tree output, so it's disabled in tree
5960 selected_nodes = nodes
5962 # For optimal merge order:
5963 # * Only pop one node.
5964 # * Removing a root node (node without a parent)
5965 # will not produce a leaf node, so avoid it.
5966 # * It's normal for a selected uninstall to be a
5967 # root node, so don't check them for parents.
5969 prefer_asap_parents = (True, False)
5971 prefer_asap_parents = (False,)
5972 for check_asap_parent in prefer_asap_parents:
5973 if check_asap_parent:
5975 parents = mygraph.parent_nodes(node,
5976 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5977 if any(x in asap_nodes for x in parents):
5978 selected_nodes = [node]
5982 if mygraph.parent_nodes(node):
5983 selected_nodes = [node]
5990 if not selected_nodes:
5991 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
5993 mergeable_nodes = set(nodes)
5994 if prefer_asap and asap_nodes:
5996 # When gathering the nodes belonging to a runtime cycle,
5997 # we want to minimize the number of nodes gathered, since
5998 # this tends to produce a more optimal merge order.
5999 # Ignoring all medium_soft deps serves this purpose.
6000 # In the case of multiple runtime cycles, where some cycles
6001 # may depend on smaller independent cycles, it's optimal
6002 # to merge smaller independent cycles before other cycles
6003 # that depend on them. Therefore, we search for the
6004 # smallest cycle in order to try and identify and prefer
6005 # these smaller independent cycles.
6006 ignore_priority = priority_range.ignore_medium_soft
6007 smallest_cycle = None
6009 if not mygraph.parent_nodes(node):
6011 selected_nodes = set()
6012 if gather_deps(ignore_priority,
6013 mergeable_nodes, selected_nodes, node):
6014 # When selecting asap_nodes, we need to ensure
6015 # that we haven't selected a large runtime cycle
6016 # that is obviously sub-optimal. This will be
6017 # obvious if any of the non-asap selected_nodes
6018 # is a leaf node when medium_soft deps are
6020 if prefer_asap and asap_nodes and \
6021 len(selected_nodes) > 1:
6022 for node in selected_nodes.difference(
6024 if not mygraph.child_nodes(node,
6026 DepPriorityNormalRange.ignore_medium_soft):
6027 selected_nodes = None
6030 if smallest_cycle is None or \
6031 len(selected_nodes) < len(smallest_cycle):
6032 smallest_cycle = selected_nodes
6034 selected_nodes = smallest_cycle
6036 if selected_nodes and debug:
6037 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
6038 (len(selected_nodes),), noiselevel=-1)
6039 cycle_digraph = mygraph.copy()
6040 cycle_digraph.difference_update([x for x in
6041 cycle_digraph if x not in selected_nodes])
6042 cycle_digraph.debug_print()
6043 writemsg("\n", noiselevel=-1)
6045 if prefer_asap and asap_nodes and not selected_nodes:
6046 # We failed to find any asap nodes to merge, so ignore
6047 # them for the next iteration.
6051 if selected_nodes and ignore_priority is not None:
6052 # Try to merge ignored medium_soft deps as soon as possible
6053 # if they're not satisfied by installed packages.
6054 for node in selected_nodes:
6055 children = set(mygraph.child_nodes(node))
6056 soft = children.difference(
6057 mygraph.child_nodes(node,
6058 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
6059 medium_soft = children.difference(
6060 mygraph.child_nodes(node,
6062 DepPrioritySatisfiedRange.ignore_medium_soft))
6063 medium_soft.difference_update(soft)
6064 for child in medium_soft:
6065 if child in selected_nodes:
6067 if child in asap_nodes:
6069 # Merge PDEPEND asap for bug #180045.
6070 asap_nodes.append(child)
6072 if selected_nodes and len(selected_nodes) > 1:
6073 if not isinstance(selected_nodes, list):
6074 selected_nodes = list(selected_nodes)
6075 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
6077 if not selected_nodes and myblocker_uninstalls:
6078 # An Uninstall task needs to be executed in order to
6079 # avoid conflict if possible.
6082 priority_range = DepPrioritySatisfiedRange
6084 priority_range = DepPriorityNormalRange
6086 mergeable_nodes = get_nodes(
6087 ignore_priority=ignore_uninst_or_med)
6089 min_parent_deps = None
6092 for task in myblocker_uninstalls.leaf_nodes():
6093 # Do some sanity checks so that system or world packages
6094 # don't get uninstalled inappropriately here (only really
6095 # necessary when --complete-graph has not been enabled).
6097 if task in ignored_uninstall_tasks:
6100 if task in scheduled_uninstalls:
6101 # It's been scheduled but it hasn't
6102 # been executed yet due to dependence
6103 # on installation of blocking packages.
6106 root_config = self._frozen_config.roots[task.root]
6107 inst_pkg = self._pkg(task.cpv, "installed", root_config,
6110 if self._dynamic_config.digraph.contains(inst_pkg):
6113 forbid_overlap = False
6114 heuristic_overlap = False
6115 for blocker in myblocker_uninstalls.parent_nodes(task):
6116 if not eapi_has_strong_blocks(blocker.eapi):
6117 heuristic_overlap = True
6118 elif blocker.atom.blocker.overlap.forbid:
6119 forbid_overlap = True
6121 if forbid_overlap and running_root == task.root:
6124 if heuristic_overlap and running_root == task.root:
6125 # Never uninstall sys-apps/portage or it's essential
6126 # dependencies, except through replacement.
6128 runtime_dep_atoms = \
6129 list(runtime_deps.iterAtomsForPackage(task))
6130 except portage.exception.InvalidDependString as e:
6131 portage.writemsg("!!! Invalid PROVIDE in " + \
6132 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6133 (task.root, task.cpv, e), noiselevel=-1)
6137 # Don't uninstall a runtime dep if it appears
6138 # to be the only suitable one installed.
6140 vardb = root_config.trees["vartree"].dbapi
6141 for atom in runtime_dep_atoms:
6142 other_version = None
6143 for pkg in vardb.match_pkgs(atom):
6144 if pkg.cpv == task.cpv and \
6145 pkg.counter == task.counter:
6149 if other_version is None:
6155 # For packages in the system set, don't take
6156 # any chances. If the conflict can't be resolved
6157 # by a normal replacement operation then abort.
6160 for atom in root_config.sets[
6161 "system"].iterAtomsForPackage(task):
6164 except portage.exception.InvalidDependString as e:
6165 portage.writemsg("!!! Invalid PROVIDE in " + \
6166 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6167 (task.root, task.cpv, e), noiselevel=-1)
6173 # Note that the world check isn't always
6174 # necessary since self._complete_graph() will
6175 # add all packages from the system and world sets to the
6176 # graph. This just allows unresolved conflicts to be
6177 # detected as early as possible, which makes it possible
6178 # to avoid calling self._complete_graph() when it is
6179 # unnecessary due to blockers triggering an abortion.
6181 # For packages in the world set, go ahead an uninstall
6182 # when necessary, as long as the atom will be satisfied
6183 # in the final state.
6184 graph_db = self._dynamic_config.mydbapi[task.root]
6187 for atom in root_config.sets[
6188 "selected"].iterAtomsForPackage(task):
6190 for pkg in graph_db.match_pkgs(atom):
6197 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
6199 except portage.exception.InvalidDependString as e:
6200 portage.writemsg("!!! Invalid PROVIDE in " + \
6201 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6202 (task.root, task.cpv, e), noiselevel=-1)
6208 # Check the deps of parent nodes to ensure that
6209 # the chosen task produces a leaf node. Maybe
6210 # this can be optimized some more to make the
6211 # best possible choice, but the current algorithm
6212 # is simple and should be near optimal for most
6214 self._spinner_update()
6215 mergeable_parent = False
6217 parent_deps.add(task)
6218 for parent in mygraph.parent_nodes(task):
6219 parent_deps.update(mygraph.child_nodes(parent,
6220 ignore_priority=priority_range.ignore_medium_soft))
6221 if min_parent_deps is not None and \
6222 len(parent_deps) >= min_parent_deps:
6223 # This task is no better than a previously selected
6224 # task, so abort search now in order to avoid wasting
6225 # any more cpu time on this task. This increases
6226 # performance dramatically in cases when there are
6227 # hundreds of blockers to solve, like when
6228 # upgrading to a new slot of kde-meta.
6229 mergeable_parent = None
6231 if parent in mergeable_nodes and \
6232 gather_deps(ignore_uninst_or_med_soft,
6233 mergeable_nodes, set(), parent):
6234 mergeable_parent = True
6236 if not mergeable_parent:
6239 if min_parent_deps is None or \
6240 len(parent_deps) < min_parent_deps:
6241 min_parent_deps = len(parent_deps)
6244 if uninst_task is not None and min_parent_deps == 1:
6245 # This is the best possible result, so so abort search
6246 # now in order to avoid wasting any more cpu time.
6249 if uninst_task is not None:
6250 # The uninstall is performed only after blocking
6251 # packages have been merged on top of it. File
6252 # collisions between blocking packages are detected
6253 # and removed from the list of files to be uninstalled.
6254 scheduled_uninstalls.add(uninst_task)
6255 parent_nodes = mygraph.parent_nodes(uninst_task)
6257 # Reverse the parent -> uninstall edges since we want
6258 # to do the uninstall after blocking packages have
6259 # been merged on top of it.
6260 mygraph.remove(uninst_task)
6261 for blocked_pkg in parent_nodes:
6262 mygraph.add(blocked_pkg, uninst_task,
6263 priority=BlockerDepPriority.instance)
6264 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6265 scheduler_graph.add(blocked_pkg, uninst_task,
6266 priority=BlockerDepPriority.instance)
6268 # Sometimes a merge node will render an uninstall
6269 # node unnecessary (due to occupying the same SLOT),
6270 # and we want to avoid executing a separate uninstall
6271 # task in that case.
6272 slot_node = self._dynamic_config.mydbapi[uninst_task.root
6273 ].match_pkgs(uninst_task.slot_atom)
6275 slot_node[0].operation == "merge":
6276 mygraph.add(slot_node[0], uninst_task,
6277 priority=BlockerDepPriority.instance)
6279 # Reset the state variables for leaf node selection and
6280 # continue trying to select leaf nodes.
6282 drop_satisfied = False
6285 if not selected_nodes:
6286 # Only select root nodes as a last resort. This case should
6287 # only trigger when the graph is nearly empty and the only
6288 # remaining nodes are isolated (no parents or children). Since
6289 # the nodes must be isolated, ignore_priority is not needed.
6290 selected_nodes = get_nodes()
6292 if not selected_nodes and not drop_satisfied:
6293 drop_satisfied = True
6296 if not selected_nodes and myblocker_uninstalls:
6297 # If possible, drop an uninstall task here in order to avoid
6298 # the circular deps code path. The corresponding blocker will
6299 # still be counted as an unresolved conflict.
6301 for node in myblocker_uninstalls.leaf_nodes():
6303 mygraph.remove(node)
6308 ignored_uninstall_tasks.add(node)
6311 if uninst_task is not None:
6312 # Reset the state variables for leaf node selection and
6313 # continue trying to select leaf nodes.
6315 drop_satisfied = False
6318 if not selected_nodes:
6319 self._dynamic_config._circular_deps_for_display = mygraph
6320 self._dynamic_config._skip_restart = True
6321 raise self._unknown_internal_error()
6323 # At this point, we've succeeded in selecting one or more nodes, so
6324 # reset state variables for leaf node selection.
6326 drop_satisfied = False
6328 mygraph.difference_update(selected_nodes)
6330 for node in selected_nodes:
6331 if isinstance(node, Package) and \
6332 node.operation == "nomerge":
6335 # Handle interactions between blockers
6336 # and uninstallation tasks.
6337 solved_blockers = set()
6339 if isinstance(node, Package) and \
6340 "uninstall" == node.operation:
6341 have_uninstall_task = True
6344 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
6345 inst_pkg = vardb.match_pkgs(node.slot_atom)
6347 # The package will be replaced by this one, so remove
6348 # the corresponding Uninstall task if necessary.
6349 inst_pkg = inst_pkg[0]
6350 uninst_task = Package(built=inst_pkg.built,
6351 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6352 metadata=inst_pkg._metadata,
6353 operation="uninstall",
6354 root_config=inst_pkg.root_config,
6355 type_name=inst_pkg.type_name)
6357 mygraph.remove(uninst_task)
6361 if uninst_task is not None and \
6362 uninst_task not in ignored_uninstall_tasks and \
6363 myblocker_uninstalls.contains(uninst_task):
6364 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6365 myblocker_uninstalls.remove(uninst_task)
6366 # Discard any blockers that this Uninstall solves.
6367 for blocker in blocker_nodes:
6368 if not myblocker_uninstalls.child_nodes(blocker):
6369 myblocker_uninstalls.remove(blocker)
6371 self._dynamic_config._unsolvable_blockers:
6372 solved_blockers.add(blocker)
6374 retlist.append(node)
6376 if (isinstance(node, Package) and \
6377 "uninstall" == node.operation) or \
6378 (uninst_task is not None and \
6379 uninst_task in scheduled_uninstalls):
6380 # Include satisfied blockers in the merge list
6381 # since the user might be interested and also
6382 # it serves as an indicator that blocking packages
6383 # will be temporarily installed simultaneously.
6384 for blocker in solved_blockers:
6385 retlist.append(blocker)
6387 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
6388 for node in myblocker_uninstalls.root_nodes():
6389 unsolvable_blockers.add(node)
6391 # If any Uninstall tasks need to be executed in order
6392 # to avoid a conflict, complete the graph with any
6393 # dependencies that may have been initially
6394 # neglected (to ensure that unsafe Uninstall tasks
6395 # are properly identified and blocked from execution).
6396 if have_uninstall_task and \
6398 not unsolvable_blockers:
6399 self._dynamic_config.myparams["complete"] = True
6400 if '--debug' in self._frozen_config.myopts:
6402 msg.append("enabling 'complete' depgraph mode " + \
6403 "due to uninstall task(s):")
6405 for node in retlist:
6406 if isinstance(node, Package) and \
6407 node.operation == 'uninstall':
6408 msg.append("\t%s" % (node,))
6409 writemsg_level("\n%s\n" % \
6410 "".join("%s\n" % line for line in msg),
6411 level=logging.DEBUG, noiselevel=-1)
6412 raise self._serialize_tasks_retry("")
6414 # Set satisfied state on blockers, but not before the
6415 # above retry path, since we don't want to modify the
6416 # state in that case.
6417 for node in retlist:
6418 if isinstance(node, Blocker):
6419 node.satisfied = True
6421 for blocker in unsolvable_blockers:
6422 retlist.append(blocker)
6424 if unsolvable_blockers and \
6425 not self._accept_blocker_conflicts():
6426 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
6427 self._dynamic_config._serialized_tasks_cache = retlist[:]
6428 self._dynamic_config._scheduler_graph = scheduler_graph
6429 self._dynamic_config._skip_restart = True
6430 raise self._unknown_internal_error()
6432 if self._dynamic_config._slot_collision_info and \
6433 not self._accept_blocker_conflicts():
6434 self._dynamic_config._serialized_tasks_cache = retlist[:]
6435 self._dynamic_config._scheduler_graph = scheduler_graph
6436 raise self._unknown_internal_error()
6438 return retlist, scheduler_graph
6440 def _show_circular_deps(self, mygraph):
6441 self._dynamic_config._circular_dependency_handler = \
6442 circular_dependency_handler(self, mygraph)
6443 handler = self._dynamic_config._circular_dependency_handler
6445 self._frozen_config.myopts.pop("--quiet", None)
6446 self._frozen_config.myopts["--verbose"] = True
6447 self._frozen_config.myopts["--tree"] = True
6448 portage.writemsg("\n\n", noiselevel=-1)
6449 self.display(handler.merge_list)
6450 prefix = colorize("BAD", " * ")
6451 portage.writemsg("\n", noiselevel=-1)
6452 portage.writemsg(prefix + "Error: circular dependencies:\n",
6454 portage.writemsg("\n", noiselevel=-1)
6456 if handler.circular_dep_message is None:
6457 handler.debug_print()
6458 portage.writemsg("\n", noiselevel=-1)
6460 if handler.circular_dep_message is not None:
6461 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
6463 suggestions = handler.suggestions
6465 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
6466 if len(suggestions) == 1:
6467 writemsg("by applying the following change:\n", noiselevel=-1)
6469 writemsg("by applying " + colorize("bold", "any of") + \
6470 " the following changes:\n", noiselevel=-1)
6471 writemsg("".join(suggestions), noiselevel=-1)
6472 writemsg("\nNote that this change can be reverted, once the package has" + \
6473 " been installed.\n", noiselevel=-1)
6474 if handler.large_cycle_count:
6475 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
6476 "Several changes might be required to resolve all cycles.\n" + \
6477 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
6479 writemsg("\n\n", noiselevel=-1)
6480 writemsg(prefix + "Note that circular dependencies " + \
6481 "can often be avoided by temporarily\n", noiselevel=-1)
6482 writemsg(prefix + "disabling USE flags that trigger " + \
6483 "optional dependencies.\n", noiselevel=-1)
6485 def _show_merge_list(self):
6486 if self._dynamic_config._serialized_tasks_cache is not None and \
6487 not (self._dynamic_config._displayed_list is not None and \
6488 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
6489 self._dynamic_config._displayed_list == \
6490 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
6491 display_list = self._dynamic_config._serialized_tasks_cache[:]
6492 if "--tree" in self._frozen_config.myopts:
6493 display_list.reverse()
6494 self.display(display_list)
6496 def _show_unsatisfied_blockers(self, blockers):
6497 self._show_merge_list()
6498 msg = "Error: The above package list contains " + \
6499 "packages which cannot be installed " + \
6500 "at the same time on the same system."
6501 prefix = colorize("BAD", " * ")
6502 portage.writemsg("\n", noiselevel=-1)
6503 for line in textwrap.wrap(msg, 70):
6504 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6506 # Display the conflicting packages along with the packages
6507 # that pulled them in. This is helpful for troubleshooting
6508 # cases in which blockers don't solve automatically and
6509 # the reasons are not apparent from the normal merge list
6513 for blocker in blockers:
6514 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
6515 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
6516 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
6517 if not parent_atoms:
6518 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
6519 if atom is not None:
6520 parent_atoms = set([("@selected", atom)])
6522 conflict_pkgs[pkg] = parent_atoms
6525 # Reduce noise by pruning packages that are only
6526 # pulled in by other conflict packages.
6528 for pkg, parent_atoms in conflict_pkgs.items():
6529 relevant_parent = False
6530 for parent, atom in parent_atoms:
6531 if parent not in conflict_pkgs:
6532 relevant_parent = True
6534 if not relevant_parent:
6535 pruned_pkgs.add(pkg)
6536 for pkg in pruned_pkgs:
6537 del conflict_pkgs[pkg]
6543 for pkg, parent_atoms in conflict_pkgs.items():
6545 # Prefer packages that are not directly involved in a conflict.
6546 # It can be essential to see all the packages here, so don't
6547 # omit any. If the list is long, people can simply use a pager.
6548 preferred_parents = set()
6549 for parent_atom in parent_atoms:
6550 parent, atom = parent_atom
6551 if parent not in conflict_pkgs:
6552 preferred_parents.add(parent_atom)
6554 ordered_list = list(preferred_parents)
6555 if len(parent_atoms) > len(ordered_list):
6556 for parent_atom in parent_atoms:
6557 if parent_atom not in preferred_parents:
6558 ordered_list.append(parent_atom)
6560 msg.append(indent + "%s pulled in by\n" % pkg)
6562 for parent_atom in ordered_list:
6563 parent, atom = parent_atom
6564 msg.append(2*indent)
6565 if isinstance(parent,
6566 (PackageArg, AtomArg)):
6567 # For PackageArg and AtomArg types, it's
6568 # redundant to display the atom attribute.
6569 msg.append(str(parent))
6571 # Display the specific atom from SetArg or
6573 msg.append("%s required by %s" % (atom, parent))
6578 writemsg("".join(msg), noiselevel=-1)
6580 if "--quiet" not in self._frozen_config.myopts:
6581 show_blocker_docs_link()
6583 def display(self, mylist, favorites=[], verbosity=None):
6585 # This is used to prevent display_problems() from
6586 # redundantly displaying this exact same merge list
6587 # again via _show_merge_list().
6588 self._dynamic_config._displayed_list = mylist
6591 return display(self, mylist, favorites, verbosity)
6593 def _display_autounmask(self):
6595 Display --autounmask message and optionally write it to config files
6596 (using CONFIG_PROTECT). The message includes the comments and the changes.
6599 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
6600 autounmask_unrestricted_atoms = \
6601 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
6602 quiet = "--quiet" in self._frozen_config.myopts
6603 pretend = "--pretend" in self._frozen_config.myopts
6604 ask = "--ask" in self._frozen_config.myopts
6605 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
6607 def check_if_latest(pkg):
6609 is_latest_in_slot = True
6610 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
6611 root_config = self._frozen_config.roots[pkg.root]
6613 for db, pkg_type, built, installed, db_keys in dbs:
6614 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
6615 if other_pkg.cp != pkg.cp:
6616 # old-style PROVIDE virtual means there are no
6617 # normal matches for this pkg_type
6621 if other_pkg.slot_atom == pkg.slot_atom:
6622 is_latest_in_slot = False
6625 # iter_match_pkgs yields highest version first, so
6626 # there's no need to search this pkg_type any further
6629 if not is_latest_in_slot:
6632 return is_latest, is_latest_in_slot
6634 #Set of roots we have autounmask changes for.
6637 masked_by_missing_keywords = False
6638 unstable_keyword_msg = {}
6639 for pkg in self._dynamic_config._needed_unstable_keywords:
6640 self._show_merge_list()
6641 if pkg in self._dynamic_config.digraph:
6644 unstable_keyword_msg.setdefault(root, [])
6645 is_latest, is_latest_in_slot = check_if_latest(pkg)
6646 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6647 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6648 use=self._pkg_use_enabled(pkg))
6649 for reason in mreasons:
6650 if reason.unmask_hint and \
6651 reason.unmask_hint.key == 'unstable keyword':
6652 keyword = reason.unmask_hint.value
6654 masked_by_missing_keywords = True
6656 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
6657 if autounmask_unrestricted_atoms:
6659 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
6660 elif is_latest_in_slot:
6661 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
6663 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6665 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6667 p_mask_change_msg = {}
6668 for pkg in self._dynamic_config._needed_p_mask_changes:
6669 self._show_merge_list()
6670 if pkg in self._dynamic_config.digraph:
6673 p_mask_change_msg.setdefault(root, [])
6674 is_latest, is_latest_in_slot = check_if_latest(pkg)
6675 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6676 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6677 use=self._pkg_use_enabled(pkg))
6678 for reason in mreasons:
6679 if reason.unmask_hint and \
6680 reason.unmask_hint.key == 'p_mask':
6681 keyword = reason.unmask_hint.value
6683 comment, filename = portage.getmaskingreason(
6684 pkg.cpv, metadata=pkg._metadata,
6685 settings=pkgsettings,
6686 portdb=pkg.root_config.trees["porttree"].dbapi,
6687 return_location=True)
6689 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
6691 p_mask_change_msg[root].append("# %s:\n" % filename)
6693 comment = [line for line in
6694 comment.splitlines() if line]
6695 for line in comment:
6696 p_mask_change_msg[root].append("%s\n" % line)
6697 if autounmask_unrestricted_atoms:
6699 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
6700 elif is_latest_in_slot:
6701 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
6703 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6705 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6707 use_changes_msg = {}
6708 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
6709 self._show_merge_list()
6710 if pkg in self._dynamic_config.digraph:
6713 use_changes_msg.setdefault(root, [])
6714 is_latest, is_latest_in_slot = check_if_latest(pkg)
6715 changes = needed_use_config_change[1]
6717 for flag, state in changes.items():
6719 adjustments.append(flag)
6721 adjustments.append("-" + flag)
6722 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
6724 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6725 elif is_latest_in_slot:
6726 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
6728 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6731 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
6732 self._show_merge_list()
6733 if pkg in self._dynamic_config.digraph:
6736 license_msg.setdefault(root, [])
6737 is_latest, is_latest_in_slot = check_if_latest(pkg)
6739 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
6741 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6742 elif is_latest_in_slot:
6743 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
6745 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6747 def find_config_file(abs_user_config, file_name):
6749 Searches /etc/portage for an appropriate file to append changes to.
6750 If the file_name is a file it is returned, if it is a directory, the
6751 last file in it is returned. Order of traversal is the identical to
6752 portage.util.grablines(recursive=True).
6754 file_name - String containing a file name like "package.use"
6755 return value - String. Absolute path of file to write to. None if
6756 no suitable file exists.
6758 file_path = os.path.join(abs_user_config, file_name)
6762 except OSError as e:
6763 if e.errno == errno.ENOENT:
6764 # The file doesn't exist, so we'll
6768 # Disk or file system trouble?
6771 last_file_path = None
6780 if stat.S_ISREG(st.st_mode):
6782 elif stat.S_ISDIR(st.st_mode):
6783 if os.path.basename(p) in _ignorecvs_dirs:
6786 contents = os.listdir(p)
6790 contents.sort(reverse=True)
6791 for child in contents:
6792 if child.startswith(".") or \
6793 child.endswith("~"):
6795 stack.append(os.path.join(p, child))
6797 return last_file_path
6799 write_to_file = autounmask_write and not pretend
6800 #Make sure we have a file to write to before doing any write.
6801 file_to_write_to = {}
6805 settings = self._frozen_config.roots[root].settings
6806 abs_user_config = os.path.join(
6807 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6809 if root in unstable_keyword_msg:
6810 if not os.path.exists(os.path.join(abs_user_config,
6811 "package.keywords")):
6812 filename = "package.accept_keywords"
6814 filename = "package.keywords"
6815 file_to_write_to[(abs_user_config, "package.keywords")] = \
6816 find_config_file(abs_user_config, filename)
6818 if root in p_mask_change_msg:
6819 file_to_write_to[(abs_user_config, "package.unmask")] = \
6820 find_config_file(abs_user_config, "package.unmask")
6822 if root in use_changes_msg:
6823 file_to_write_to[(abs_user_config, "package.use")] = \
6824 find_config_file(abs_user_config, "package.use")
6826 if root in license_msg:
6827 file_to_write_to[(abs_user_config, "package.license")] = \
6828 find_config_file(abs_user_config, "package.license")
6830 for (abs_user_config, f), path in file_to_write_to.items():
6832 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6834 write_to_file = not problems
6836 def format_msg(lines):
6838 for i, line in enumerate(lines):
6839 if line.startswith("#"):
6841 lines[i] = colorize("INFORM", line.rstrip()) + "\n"
6842 return "".join(lines)
6845 settings = self._frozen_config.roots[root].settings
6846 abs_user_config = os.path.join(
6847 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6850 writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
6852 def _writemsg(reason, file):
6853 writemsg(('\nThe following %s are necessary to proceed:\n'
6854 ' (see "%s" in the portage(5) man page for more details)\n')
6855 % (colorize('BAD', reason), file), noiselevel=-1)
6857 if root in unstable_keyword_msg:
6858 _writemsg('keyword changes', 'package.accept_keywords')
6859 writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
6861 if root in p_mask_change_msg:
6862 _writemsg('mask changes', 'package.unmask')
6863 writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
6865 if root in use_changes_msg:
6866 _writemsg('USE changes', 'package.use')
6867 writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
6869 if root in license_msg:
6870 _writemsg('license changes', 'package.license')
6871 writemsg(format_msg(license_msg[root]), noiselevel=-1)
6876 settings = self._frozen_config.roots[root].settings
6877 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6878 shlex_split(settings.get("CONFIG_PROTECT", "")),
6879 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6881 def write_changes(root, changes, file_to_write_to):
6882 file_contents = None
6885 _unicode_encode(file_to_write_to,
6886 encoding=_encodings['fs'], errors='strict'),
6887 mode='r', encoding=_encodings['content'],
6888 errors='replace') as f:
6889 file_contents = f.readlines()
6890 except IOError as e:
6891 if e.errno == errno.ENOENT:
6894 problems.append("!!! Failed to read '%s': %s\n" % \
6895 (file_to_write_to, e))
6896 if file_contents is not None:
6897 file_contents.extend(changes)
6898 if protect_obj[root].isprotected(file_to_write_to):
6899 # We want to force new_protect_filename to ensure
6900 # that the user will see all our changes via
6901 # dispatch-conf, even if file_to_write_to doesn't
6902 # exist yet, so we specify force=True.
6903 file_to_write_to = new_protect_filename(file_to_write_to,
6906 write_atomic(file_to_write_to, "".join(file_contents))
6907 except PortageException:
6908 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6910 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
6913 "NOTE: The --autounmask-keep-masks option will prevent emerge",
6914 " from creating package.unmask or ** keyword changes."
6918 line = colorize("INFORM", line)
6919 writemsg(line + "\n", noiselevel=-1)
6921 if ask and write_to_file and file_to_write_to:
6922 prompt = "\nWould you like to add these " + \
6923 "changes to your config files?"
6924 if userquery(prompt, enter_invalid) == 'No':
6925 write_to_file = False
6927 if write_to_file and file_to_write_to:
6929 settings = self._frozen_config.roots[root].settings
6930 abs_user_config = os.path.join(
6931 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6932 ensure_dirs(abs_user_config)
6934 if root in unstable_keyword_msg:
6935 write_changes(root, unstable_keyword_msg[root],
6936 file_to_write_to.get((abs_user_config, "package.keywords")))
6938 if root in p_mask_change_msg:
6939 write_changes(root, p_mask_change_msg[root],
6940 file_to_write_to.get((abs_user_config, "package.unmask")))
6942 if root in use_changes_msg:
6943 write_changes(root, use_changes_msg[root],
6944 file_to_write_to.get((abs_user_config, "package.use")))
6946 if root in license_msg:
6947 write_changes(root, license_msg[root],
6948 file_to_write_to.get((abs_user_config, "package.license")))
6951 writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
6953 writemsg("".join(problems), noiselevel=-1)
6954 elif write_to_file and roots:
6955 writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
6957 elif not pretend and not autounmask_write and roots:
6958 writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
6959 "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
6960 "paying special attention to mask or keyword changes that may expose\n"
6961 "experimental or unstable packages.\n",
6965 def display_problems(self):
6967 Display problems with the dependency graph such as slot collisions.
6968 This is called internally by display() to show the problems _after_
6969 the merge list where it is most likely to be seen, but if display()
6970 is not going to be called then this method should be called explicitly
6971 to ensure that the user is notified of problems with the graph.
6974 if self._dynamic_config._circular_deps_for_display is not None:
6975 self._show_circular_deps(
6976 self._dynamic_config._circular_deps_for_display)
6978 # The slot conflict display has better noise reduction than
6979 # the unsatisfied blockers display, so skip unsatisfied blockers
6980 # display if there are slot conflicts (see bug #385391).
6981 if self._dynamic_config._slot_collision_info:
6982 self._show_slot_collision_notice()
6983 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
6984 self._show_unsatisfied_blockers(
6985 self._dynamic_config._unsatisfied_blockers_for_display)
6987 self._show_missed_update()
6989 self._show_ignored_binaries()
6991 self._display_autounmask()
6993 # TODO: Add generic support for "set problem" handlers so that
6994 # the below warnings aren't special cases for world only.
6996 if self._dynamic_config._missing_args:
6997 world_problems = False
6998 if "world" in self._dynamic_config.sets[
6999 self._frozen_config.target_root].sets:
7000 # Filter out indirect members of world (from nested sets)
7001 # since only direct members of world are desired here.
7002 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
7003 for arg, atom in self._dynamic_config._missing_args:
7004 if arg.name in ("selected", "world") and atom in world_set:
7005 world_problems = True
7009 sys.stderr.write("\n!!! Problems have been " + \
7010 "detected with your world file\n")
7011 sys.stderr.write("!!! Please run " + \
7012 green("emaint --check world")+"\n\n")
7014 if self._dynamic_config._missing_args:
7015 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7016 " Ebuilds for the following packages are either all\n")
7017 sys.stderr.write(colorize("BAD", "!!!") + \
7018 " masked or don't exist:\n")
7019 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7020 self._dynamic_config._missing_args) + "\n")
7022 if self._dynamic_config._pprovided_args:
7024 for arg, atom in self._dynamic_config._pprovided_args:
7025 if isinstance(arg, SetArg):
7027 arg_atom = (atom, atom)
7030 arg_atom = (arg.arg, atom)
7031 refs = arg_refs.setdefault(arg_atom, [])
7032 if parent not in refs:
7035 msg.append(bad("\nWARNING: "))
7036 if len(self._dynamic_config._pprovided_args) > 1:
7037 msg.append("Requested packages will not be " + \
7038 "merged because they are listed in\n")
7040 msg.append("A requested package will not be " + \
7041 "merged because it is listed in\n")
7042 msg.append("package.provided:\n\n")
7043 problems_sets = set()
7044 for (arg, atom), refs in arg_refs.items():
7047 problems_sets.update(refs)
7049 ref_string = ", ".join(["'%s'" % name for name in refs])
7050 ref_string = " pulled in by " + ref_string
7051 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7053 if "selected" in problems_sets or "world" in problems_sets:
7054 msg.append("This problem can be solved in one of the following ways:\n\n")
7055 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7056 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7057 msg.append(" C) Remove offending entries from package.provided.\n\n")
7058 msg.append("The best course of action depends on the reason that an offending\n")
7059 msg.append("package.provided entry exists.\n\n")
7060 sys.stderr.write("".join(msg))
7062 masked_packages = []
7063 for pkg in self._dynamic_config._masked_license_updates:
7064 root_config = pkg.root_config
7065 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
7066 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
7067 masked_packages.append((root_config, pkgsettings,
7068 pkg.cpv, pkg.repo, pkg._metadata, mreasons))
7070 writemsg("\n" + colorize("BAD", "!!!") + \
7071 " The following updates are masked by LICENSE changes:\n",
7073 show_masked_packages(masked_packages)
7075 writemsg("\n", noiselevel=-1)
7077 masked_packages = []
7078 for pkg in self._dynamic_config._masked_installed:
7079 root_config = pkg.root_config
7080 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
7081 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
7082 masked_packages.append((root_config, pkgsettings,
7083 pkg.cpv, pkg.repo, pkg._metadata, mreasons))
7085 writemsg("\n" + colorize("BAD", "!!!") + \
7086 " The following installed packages are masked:\n",
7088 show_masked_packages(masked_packages)
7090 writemsg("\n", noiselevel=-1)
7092 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7093 self._show_unsatisfied_dep(*pargs,
7094 **portage._native_kwargs(kwargs))
7096 def saveNomergeFavorites(self):
7097 """Find atoms in favorites that are not in the mergelist and add them
7098 to the world file if necessary."""
7099 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7100 "--oneshot", "--onlydeps", "--pretend"):
7101 if x in self._frozen_config.myopts:
7103 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7104 world_set = root_config.sets["selected"]
7106 world_locked = False
7107 if hasattr(world_set, "lock"):
7111 if hasattr(world_set, "load"):
7112 world_set.load() # maybe it's changed on disk
7114 args_set = self._dynamic_config.sets[
7115 self._frozen_config.target_root].sets['__non_set_args__']
7116 added_favorites = set()
7117 for x in self._dynamic_config._set_nodes:
7118 if x.operation != "nomerge":
7121 if x.root != root_config.root:
7125 myfavkey = create_world_atom(x, args_set, root_config)
7127 if myfavkey in added_favorites:
7129 added_favorites.add(myfavkey)
7130 except portage.exception.InvalidDependString as e:
7131 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7132 (x.cpv, e), noiselevel=-1)
7133 writemsg("!!! see '%s'\n\n" % os.path.join(
7134 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
7137 for arg in self._dynamic_config._initial_arg_list:
7138 if not isinstance(arg, SetArg):
7140 if arg.root_config.root != root_config.root:
7146 if k in ("selected", "world") or \
7147 not root_config.sets[k].world_candidate:
7152 all_added.append(SETPREFIX + k)
7153 all_added.extend(added_favorites)
7157 if "--ask" in self._frozen_config.myopts:
7158 writemsg_stdout("\n", noiselevel=-1)
7160 writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
7162 writemsg_stdout("\n", noiselevel=-1)
7163 prompt = "Would you like to add these packages to your world " \
7165 enter_invalid = '--ask-enter-invalid' in \
7166 self._frozen_config.myopts
7167 if userquery(prompt, enter_invalid) == "No":
7172 if a.startswith(SETPREFIX):
7173 filename = "world_sets"
7177 ">>> Recording %s in \"%s\" favorites file...\n" %
7178 (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
7179 world_set.update(all_added)
7184 def _loadResumeCommand(self, resume_data, skip_masked=True,
7187 Add a resume command to the graph and validate it in the process. This
7188 will raise a PackageNotFound exception if a package is not available.
7193 if not isinstance(resume_data, dict):
7196 mergelist = resume_data.get("mergelist")
7197 if not isinstance(mergelist, list):
7200 favorites = resume_data.get("favorites")
7201 if isinstance(favorites, list):
7202 args = self._load_favorites(favorites)
7206 fakedb = self._dynamic_config.mydbapi
7207 serialized_tasks = []
7210 if not (isinstance(x, list) and len(x) == 4):
7212 pkg_type, myroot, pkg_key, action = x
7213 if pkg_type not in self.pkg_tree_map:
7215 if action != "merge":
7217 root_config = self._frozen_config.roots[myroot]
7219 # Use the resume "favorites" list to see if a repo was specified
7221 depgraph_sets = self._dynamic_config.sets[root_config.root]
7223 for atom in depgraph_sets.atoms.getAtoms():
7224 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
7228 atom = "=" + pkg_key
7230 atom = atom + _repo_separator + repo
7233 atom = Atom(atom, allow_repo=True)
7238 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
7239 if not self._pkg_visibility_check(pkg) or \
7240 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
7241 modified_use=self._pkg_use_enabled(pkg)):
7246 # It does no exist or it is corrupt.
7248 # TODO: log these somewhere
7250 raise portage.exception.PackageNotFound(pkg_key)
7252 if "merge" == pkg.operation and \
7253 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
7254 modified_use=self._pkg_use_enabled(pkg)):
7257 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
7259 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7261 self._dynamic_config._unsatisfied_deps_for_display.append(
7262 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7264 fakedb[myroot].cpv_inject(pkg)
7265 serialized_tasks.append(pkg)
7266 self._spinner_update()
7268 if self._dynamic_config._unsatisfied_deps_for_display:
7271 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
7272 self._dynamic_config._serialized_tasks_cache = serialized_tasks
7273 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
7275 self._select_package = self._select_pkg_from_graph
7276 self._dynamic_config.myparams["selective"] = True
7277 # Always traverse deep dependencies in order to account for
7278 # potentially unsatisfied dependencies of installed packages.
7279 # This is necessary for correct --keep-going or --resume operation
7280 # in case a package from a group of circularly dependent packages
7281 # fails. In this case, a package which has recently been installed
7282 # may have an unsatisfied circular dependency (pulled in by
7283 # PDEPEND, for example). So, even though a package is already
7284 # installed, it may not have all of it's dependencies satisfied, so
7285 # it may not be usable. If such a package is in the subgraph of
7286 # deep depenedencies of a scheduled build, that build needs to
7287 # be cancelled. In order for this type of situation to be
7288 # recognized, deep traversal of dependencies is required.
7289 self._dynamic_config.myparams["deep"] = True
7291 for task in serialized_tasks:
7292 if isinstance(task, Package) and \
7293 task.operation == "merge":
7294 if not self._add_pkg(task, None):
7297 # Packages for argument atoms need to be explicitly
7298 # added via _add_pkg() so that they are included in the
7299 # digraph (needed at least for --tree display).
7300 for arg in self._expand_set_args(args, add_to_digraph=True):
7301 for atom in arg.pset.getAtoms():
7302 pkg, existing_node = self._select_package(
7303 arg.root_config.root, atom)
7304 if existing_node is None and \
7306 if not self._add_pkg(pkg, Dependency(atom=atom,
7307 root=pkg.root, parent=arg)):
7310 # Allow unsatisfied deps here to avoid showing a masking
7311 # message for an unsatisfied dep that isn't necessarily
7313 if not self._create_graph(allow_unsatisfied=True):
7316 unsatisfied_deps = []
7317 for dep in self._dynamic_config._unsatisfied_deps:
7318 if not isinstance(dep.parent, Package):
7320 if dep.parent.operation == "merge":
7321 unsatisfied_deps.append(dep)
7324 # For unsatisfied deps of installed packages, only account for
7325 # them if they are in the subgraph of dependencies of a package
7326 # which is scheduled to be installed.
7327 unsatisfied_install = False
7329 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
7331 node = dep_stack.pop()
7332 if not isinstance(node, Package):
7334 if node.operation == "merge":
7335 unsatisfied_install = True
7337 if node in traversed:
7340 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
7342 if unsatisfied_install:
7343 unsatisfied_deps.append(dep)
7345 if masked_tasks or unsatisfied_deps:
7346 # This probably means that a required package
7347 # was dropped via --skipfirst. It makes the
7348 # resume list invalid, so convert it to a
7349 # UnsatisfiedResumeDep exception.
7350 raise self.UnsatisfiedResumeDep(self,
7351 masked_tasks + unsatisfied_deps)
7352 self._dynamic_config._serialized_tasks_cache = None
7355 except self._unknown_internal_error:
7360 def _load_favorites(self, favorites):
7362 Use a list of favorites to resume state from a
7363 previous select_files() call. This creates similar
7364 DependencyArg instances to those that would have
7365 been created by the original select_files() call.
7366 This allows Package instances to be matched with
7367 DependencyArg instances during graph creation.
7369 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7370 sets = root_config.sets
7371 depgraph_sets = self._dynamic_config.sets[root_config.root]
7374 if not isinstance(x, basestring):
7376 if x in ("system", "world"):
7378 if x.startswith(SETPREFIX):
7379 s = x[len(SETPREFIX):]
7382 if s in depgraph_sets.sets:
7385 depgraph_sets.sets[s] = pset
7386 args.append(SetArg(arg=x, pset=pset,
7387 root_config=root_config))
7390 x = Atom(x, allow_repo=True)
7391 except portage.exception.InvalidAtom:
7393 args.append(AtomArg(arg=x, atom=x,
7394 root_config=root_config))
7396 self._set_args(args)
7399 class UnsatisfiedResumeDep(portage.exception.PortageException):
7401 A dependency of a resume list is not installed. This
7402 can occur when a required package is dropped from the
7403 merge list via --skipfirst.
7405 def __init__(self, depgraph, value):
7406 portage.exception.PortageException.__init__(self, value)
7407 self.depgraph = depgraph
7409 class _internal_exception(portage.exception.PortageException):
7410 def __init__(self, value=""):
7411 portage.exception.PortageException.__init__(self, value)
7413 class _unknown_internal_error(_internal_exception):
7415 Used by the depgraph internally to terminate graph creation.
7416 The specific reason for the failure should have been dumped
7417 to stderr, unfortunately, the exact reason for the failure
7421 class _serialize_tasks_retry(_internal_exception):
7423 This is raised by the _serialize_tasks() method when it needs to
7424 be called again for some reason. The only case that it's currently
7425 used for is when neglected dependencies need to be added to the
7426 graph in order to avoid making a potentially unsafe decision.
7429 class _backtrack_mask(_internal_exception):
7431 This is raised by _show_unsatisfied_dep() when it's called with
7432 check_backtrack=True and a matching package has been masked by
7436 class _autounmask_breakage(_internal_exception):
7438 This is raised by _show_unsatisfied_dep() when it's called with
7439 check_autounmask_breakage=True and a matching package has been
7440 been disqualified due to autounmask changes.
7443 def need_restart(self):
7444 return self._dynamic_config._need_restart and \
7445 not self._dynamic_config._skip_restart
7447 def success_without_autounmask(self):
7448 return self._dynamic_config._success_without_autounmask
7450 def autounmask_breakage_detected(self):
7452 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7453 self._show_unsatisfied_dep(
7454 *pargs, check_autounmask_breakage=True,
7455 **portage._native_kwargs(kwargs))
7456 except self._autounmask_breakage:
7460 def get_backtrack_infos(self):
7461 return self._dynamic_config._backtrack_infos
7464 class _dep_check_composite_db(dbapi):
7466 A dbapi-like interface that is optimized for use in dep_check() calls.
7467 This is built on top of the existing depgraph package selection logic.
7468 Some packages that have been added to the graph may be masked from this
7469 view in order to influence the atom preference selection that occurs
7472 def __init__(self, depgraph, root):
7473 dbapi.__init__(self)
7474 self._depgraph = depgraph
7476 self._match_cache = {}
7477 self._cpv_pkg_map = {}
7479 def _clear_cache(self):
7480 self._match_cache.clear()
7481 self._cpv_pkg_map.clear()
7483 def cp_list(self, cp):
7485 Emulate cp_list just so it can be used to check for existence
7486 of new-style virtuals. Since it's a waste of time to return
7487 more than one cpv for this use case, a maximum of one cpv will
7490 if isinstance(cp, Atom):
7495 for pkg in self._depgraph._iter_match_pkgs_any(
7496 self._depgraph._frozen_config.roots[self._root], atom):
7503 def match(self, atom):
7504 cache_key = (atom, atom.unevaluated_atom)
7505 ret = self._match_cache.get(cache_key)
7510 pkg, existing = self._depgraph._select_package(self._root, atom)
7512 if pkg is not None and self._visible(pkg):
7513 self._cpv_pkg_map[pkg.cpv] = pkg
7516 if pkg is not None and \
7517 atom.slot is None and \
7518 pkg.cp.startswith("virtual/") and \
7519 (("remove" not in self._depgraph._dynamic_config.myparams and
7520 "--update" not in self._depgraph._frozen_config.myopts) or
7522 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
7523 # For new-style virtual lookahead that occurs inside dep_check()
7524 # for bug #141118, examine all slots. This is needed so that newer
7525 # slots will not unnecessarily be pulled in when a satisfying lower
7526 # slot is already installed. For example, if virtual/jdk-1.5 is
7527 # satisfied via gcj-jdk then there's no need to pull in a newer
7528 # slot to satisfy a virtual/jdk dependency, unless --update is
7532 for virt_pkg in self._depgraph._iter_match_pkgs_any(
7533 self._depgraph._frozen_config.roots[self._root], atom):
7534 if virt_pkg.cp != pkg.cp:
7536 slots.add(virt_pkg.slot)
7538 slots.remove(pkg.slot)
7540 slot_atom = atom.with_slot(slots.pop())
7541 pkg, existing = self._depgraph._select_package(
7542 self._root, slot_atom)
7545 if not self._visible(pkg):
7547 self._cpv_pkg_map[pkg.cpv] = pkg
7551 self._cpv_sort_ascending(ret)
7553 self._match_cache[cache_key] = ret
7556 def _visible(self, pkg):
7557 if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
7559 if pkg.installed and \
7560 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
7561 # Account for packages with masks (like KEYWORDS masks)
7562 # that are usually ignored in visibility checks for
7563 # installed packages, in order to handle cases like
7565 myopts = self._depgraph._frozen_config.myopts
7566 use_ebuild_visibility = myopts.get(
7567 '--use-ebuild-visibility', 'n') != 'n'
7568 avoid_update = "--update" not in myopts and \
7569 "remove" not in self._depgraph._dynamic_config.myparams
7570 usepkgonly = "--usepkgonly" in myopts
7571 if not avoid_update:
7572 if not use_ebuild_visibility and usepkgonly:
7574 elif not self._depgraph._equiv_ebuild_visible(pkg):
7577 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
7578 self._root].get(pkg.slot_atom)
7579 if in_graph is None:
7580 # Mask choices for packages which are not the highest visible
7581 # version within their slot (since they usually trigger slot
7583 highest_visible, in_graph = self._depgraph._select_package(
7584 self._root, pkg.slot_atom)
7585 # Note: highest_visible is not necessarily the real highest
7586 # visible, especially when --update is not enabled, so use
7587 # < operator instead of !=.
7588 if highest_visible is not None and pkg < highest_visible:
7590 elif in_graph != pkg:
7591 # Mask choices for packages that would trigger a slot
7592 # conflict with a previously selected package.
7596 def aux_get(self, cpv, wants):
7597 metadata = self._cpv_pkg_map[cpv]._metadata
7598 return [metadata.get(x, "") for x in wants]
7600 def match_pkgs(self, atom):
7601 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
7603 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
7605 if "--quiet" in myopts:
7606 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7607 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
7608 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7609 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
7612 s = search(root_config, spinner, "--searchdesc" in myopts,
7613 "--quiet" not in myopts, "--usepkg" in myopts,
7614 "--usepkgonly" in myopts)
7615 null_cp = portage.dep_getkey(insert_category_into_atom(
7617 cat, atom_pn = portage.catsplit(null_cp)
7618 s.searchkey = atom_pn
7619 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7622 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7623 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
7625 def _spinner_start(spinner, myopts):
7628 if "--quiet" not in myopts and \
7629 ("--pretend" in myopts or "--ask" in myopts or \
7630 "--tree" in myopts or "--verbose" in myopts):
7632 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7634 elif "--buildpkgonly" in myopts:
7638 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
7639 if "--unordered-display" in myopts:
7640 portage.writemsg_stdout("\n" + \
7641 darkgreen("These are the packages that " + \
7642 "would be %s:" % action) + "\n\n")
7644 portage.writemsg_stdout("\n" + \
7645 darkgreen("These are the packages that " + \
7646 "would be %s, in reverse order:" % action) + "\n\n")
7648 portage.writemsg_stdout("\n" + \
7649 darkgreen("These are the packages that " + \
7650 "would be %s, in order:" % action) + "\n\n")
7652 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
7653 if not show_spinner:
7654 spinner.update = spinner.update_quiet
7657 portage.writemsg_stdout("Calculating dependencies ")
7659 def _spinner_stop(spinner):
7660 if spinner is None or \
7661 spinner.update == spinner.update_quiet:
7664 if spinner.update != spinner.update_basic:
7665 # update_basic is used for non-tty output,
7666 # so don't output backspaces in that case.
7667 portage.writemsg_stdout("\b\b")
7669 portage.writemsg_stdout("... done!\n")
7671 def backtrack_depgraph(settings, trees, myopts, myparams,
7672 myaction, myfiles, spinner):
7674 Raises PackageSetNotFound if myfiles contains a missing package set.
7676 _spinner_start(spinner, myopts)
7678 return _backtrack_depgraph(settings, trees, myopts, myparams,
7679 myaction, myfiles, spinner)
7681 _spinner_stop(spinner)
7684 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
7686 debug = "--debug" in myopts
7688 max_retries = myopts.get('--backtrack', 10)
7689 max_depth = max(1, (max_retries + 1) / 2)
7690 allow_backtracking = max_retries > 0
7691 backtracker = Backtracker(max_depth)
7694 frozen_config = _frozen_depgraph_config(settings, trees,
7699 if debug and mydepgraph is not None:
7701 "\n\nbacktracking try %s \n\n" % \
7702 backtracked, noiselevel=-1, level=logging.DEBUG)
7703 mydepgraph.display_problems()
7705 backtrack_parameters = backtracker.get()
7707 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7708 frozen_config=frozen_config,
7709 allow_backtracking=allow_backtracking,
7710 backtrack_parameters=backtrack_parameters)
7711 success, favorites = mydepgraph.select_files(myfiles)
7713 if success or mydepgraph.success_without_autounmask():
7715 elif not allow_backtracking:
7717 elif backtracked >= max_retries:
7719 elif mydepgraph.need_restart():
7721 backtracker.feedback(mydepgraph.get_backtrack_infos())
7725 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
7729 "\n\nbacktracking aborted after %s tries\n\n" % \
7730 backtracked, noiselevel=-1, level=logging.DEBUG)
7731 mydepgraph.display_problems()
7733 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7734 frozen_config=frozen_config,
7735 allow_backtracking=False,
7736 backtrack_parameters=backtracker.get_best_run())
7737 success, favorites = mydepgraph.select_files(myfiles)
7739 if not success and mydepgraph.autounmask_breakage_detected():
7742 "\n\nautounmask breakage detected\n\n",
7743 noiselevel=-1, level=logging.DEBUG)
7744 mydepgraph.display_problems()
7745 myopts["--autounmask"] = "n"
7746 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7747 frozen_config=frozen_config, allow_backtracking=False)
7748 success, favorites = mydepgraph.select_files(myfiles)
7750 return (success, mydepgraph, favorites)
7753 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7755 Raises PackageSetNotFound if myfiles contains a missing package set.
7757 _spinner_start(spinner, myopts)
7759 return _resume_depgraph(settings, trees, mtimedb, myopts,
7762 _spinner_stop(spinner)
7764 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7766 Construct a depgraph for the given resume list. This will raise
7767 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
7768 TODO: Return reasons for dropped_tasks, for display/logging.
7770 @return: (success, depgraph, dropped_tasks)
7773 skip_unsatisfied = True
7774 mergelist = mtimedb["resume"]["mergelist"]
7776 frozen_config = _frozen_depgraph_config(settings, trees,
7779 mydepgraph = depgraph(settings, trees,
7780 myopts, myparams, spinner, frozen_config=frozen_config)
7782 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7783 skip_masked=skip_masked)
7784 except depgraph.UnsatisfiedResumeDep as e:
7785 if not skip_unsatisfied:
7788 graph = mydepgraph._dynamic_config.digraph
7789 unsatisfied_parents = {}
7790 traversed_nodes = set()
7791 unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
7792 while unsatisfied_stack:
7793 pkg, atom = unsatisfied_stack.pop()
7794 if atom is not None and \
7795 mydepgraph._select_pkg_from_installed(
7796 pkg.root, atom)[0] is not None:
7798 atoms = unsatisfied_parents.get(pkg)
7801 unsatisfied_parents[pkg] = atoms
7802 if atom is not None:
7804 if pkg in traversed_nodes:
7806 traversed_nodes.add(pkg)
7808 # If this package was pulled in by a parent
7809 # package scheduled for merge, removing this
7810 # package may cause the the parent package's
7811 # dependency to become unsatisfied.
7812 for parent_node, atom in \
7813 mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
7814 if not isinstance(parent_node, Package) \
7815 or parent_node.operation not in ("merge", "nomerge"):
7817 # We need to traverse all priorities here, in order to
7818 # ensure that a package with an unsatisfied depenedency
7819 # won't get pulled in, even indirectly via a soft
7821 unsatisfied_stack.append((parent_node, atom))
7823 unsatisfied_tuples = frozenset(tuple(parent_node)
7824 for parent_node in unsatisfied_parents
7825 if isinstance(parent_node, Package))
7826 pruned_mergelist = []
7828 if isinstance(x, list) and \
7829 tuple(x) not in unsatisfied_tuples:
7830 pruned_mergelist.append(x)
7832 # If the mergelist doesn't shrink then this loop is infinite.
7833 if len(pruned_mergelist) == len(mergelist):
7834 # This happens if a package can't be dropped because
7835 # it's already installed, but it has unsatisfied PDEPEND.
7837 mergelist[:] = pruned_mergelist
7839 # Exclude installed packages that have been removed from the graph due
7840 # to failure to build/install runtime dependencies after the dependent
7841 # package has already been installed.
7842 dropped_tasks.update((pkg, atoms) for pkg, atoms in \
7843 unsatisfied_parents.items() if pkg.operation != "nomerge")
7845 del e, graph, traversed_nodes, \
7846 unsatisfied_parents, unsatisfied_stack
7850 return (success, mydepgraph, dropped_tasks)
7852 def get_mask_info(root_config, cpv, pkgsettings,
7853 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
7855 metadata = dict(zip(db_keys,
7856 db.aux_get(cpv, db_keys, myrepo=myrepo)))
7860 if metadata is None:
7861 mreasons = ["corruption"]
7863 eapi = metadata['EAPI']
7864 if not portage.eapi_is_supported(eapi):
7865 mreasons = ['EAPI %s' % eapi]
7867 pkg = Package(type_name=pkg_type, root_config=root_config,
7868 cpv=cpv, built=built, installed=installed, metadata=metadata)
7871 if _pkg_use_enabled is not None:
7872 modified_use = _pkg_use_enabled(pkg)
7874 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7876 return metadata, mreasons
7878 def show_masked_packages(masked_packages):
7879 shown_licenses = set()
7880 shown_comments = set()
7881 # Maybe there is both an ebuild and a binary. Only
7882 # show one of them to avoid redundant appearance.
7884 have_eapi_mask = False
7885 for (root_config, pkgsettings, cpv, repo,
7886 metadata, mreasons) in masked_packages:
7889 output_cpv += _repo_separator + repo
7890 if output_cpv in shown_cpvs:
7892 shown_cpvs.add(output_cpv)
7893 eapi_masked = metadata is not None and \
7894 not portage.eapi_is_supported(metadata["EAPI"])
7896 have_eapi_mask = True
7897 # When masked by EAPI, metadata is mostly useless since
7898 # it doesn't contain essential things like SLOT.
7900 comment, filename = None, None
7901 if not eapi_masked and \
7902 "package.mask" in mreasons:
7903 comment, filename = \
7904 portage.getmaskingreason(
7905 cpv, metadata=metadata,
7906 settings=pkgsettings,
7907 portdb=root_config.trees["porttree"].dbapi,
7908 return_location=True)
7909 missing_licenses = []
7910 if not eapi_masked and metadata is not None:
7912 missing_licenses = \
7913 pkgsettings._getMissingLicenses(
7915 except portage.exception.InvalidDependString:
7916 # This will have already been reported
7917 # above via mreasons.
7920 writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
7923 if comment and comment not in shown_comments:
7924 writemsg(filename + ":\n" + comment + "\n",
7926 shown_comments.add(comment)
7927 portdb = root_config.trees["porttree"].dbapi
7928 for l in missing_licenses:
7929 if l in shown_licenses:
7931 l_path = portdb.findLicensePath(l)
7934 msg = ("A copy of the '%s' license" + \
7935 " is located at '%s'.\n\n") % (l, l_path)
7936 writemsg(msg, noiselevel=-1)
7937 shown_licenses.add(l)
7938 return have_eapi_mask
7940 def show_mask_docs():
7941 writemsg("For more information, see the MASKED PACKAGES "
7942 "section in the emerge\n", noiselevel=-1)
7943 writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7945 def show_blocker_docs_link():
7946 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7947 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7948 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7950 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7951 return [mreason.message for \
7952 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7954 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7955 mreasons = _getmaskingstatus(
7956 pkg, settings=pkgsettings,
7957 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7959 if not pkg.installed:
7960 if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
7961 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7962 pkg._metadata["CHOST"]))
7965 for msgs in pkg.invalid.values():
7968 _MaskReason("invalid", "invalid: %s" % (msg,)))
7970 if not pkg._metadata["SLOT"]:
7972 _MaskReason("invalid", "SLOT: undefined"))