1 # Copyright 1999-2013 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function, unicode_literals
12 from collections import deque
13 from itertools import chain
16 from portage import os, OrderedDict
17 from portage import _unicode_decode, _unicode_encode, _encodings
18 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
19 from portage.dbapi import dbapi
20 from portage.dbapi.dep_expand import dep_expand
21 from portage.dbapi._similar_name_search import similar_name_search
22 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
23 check_required_use, human_readable_required_use, match_from_list, \
25 from portage.dep._slot_operator import ignore_built_slot_operator_deps
26 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
28 from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
29 PackageNotFound, PortageException)
30 from portage.output import colorize, create_color_func, \
32 bad = create_color_func("BAD")
33 from portage.package.ebuild.config import _get_feature_flags
34 from portage.package.ebuild.getmaskingstatus import \
35 _getmaskingstatus, _MaskReason
36 from portage._sets import SETPREFIX
37 from portage._sets.base import InternalPackageSet
38 from portage.util import ConfigProtect, shlex_split, new_protect_filename
39 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
40 from portage.util import ensure_dirs
41 from portage.util import writemsg_level, write_atomic
42 from portage.util.digraph import digraph
43 from portage.util._async.TaskScheduler import TaskScheduler
44 from portage.util._eventloop.EventLoop import EventLoop
45 from portage.util._eventloop.global_event_loop import global_event_loop
46 from portage.versions import catpkgsplit
48 from _emerge.AtomArg import AtomArg
49 from _emerge.Blocker import Blocker
50 from _emerge.BlockerCache import BlockerCache
51 from _emerge.BlockerDepPriority import BlockerDepPriority
52 from _emerge.countdown import countdown
53 from _emerge.create_world_atom import create_world_atom
54 from _emerge.Dependency import Dependency
55 from _emerge.DependencyArg import DependencyArg
56 from _emerge.DepPriority import DepPriority
57 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
58 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
59 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
60 from _emerge.FakeVartree import FakeVartree
61 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
62 from _emerge.is_valid_package_atom import insert_category_into_atom, \
64 from _emerge.Package import Package
65 from _emerge.PackageArg import PackageArg
66 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
67 from _emerge.RootConfig import RootConfig
68 from _emerge.search import search
69 from _emerge.SetArg import SetArg
70 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
71 from _emerge.UnmergeDepPriority import UnmergeDepPriority
72 from _emerge.UseFlagDisplay import pkg_use_display
73 from _emerge.userquery import userquery
75 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
76 from _emerge.resolver.slot_collision import slot_conflict_handler
77 from _emerge.resolver.circular_dependency import circular_dependency_handler
78 from _emerge.resolver.output import Display
80 if sys.hexversion >= 0x3000000:
87 class _scheduler_graph_config(object):
88 def __init__(self, trees, pkg_cache, graph, mergelist):
90 self.pkg_cache = pkg_cache
92 self.mergelist = mergelist
94 def _wildcard_set(atoms):
95 pkgs = InternalPackageSet(allow_wildcard=True)
98 x = Atom(x, allow_wildcard=True, allow_repo=False)
99 except portage.exception.InvalidAtom:
100 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
104 class _frozen_depgraph_config(object):
106 def __init__(self, settings, trees, myopts, spinner):
107 self.settings = settings
108 self.target_root = settings["EROOT"]
111 if settings.get("PORTAGE_DEBUG", "") == "1":
113 self.spinner = spinner
114 self._running_root = trees[trees._running_eroot]["root_config"]
115 self.pkgsettings = {}
117 self._trees_orig = trees
119 # All Package instances
121 self._highest_license_masked = {}
122 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
123 ignore_built_slot_operator_deps = myopts.get(
124 "--ignore-built-slot-operator-deps", "n") == "y"
126 self.trees[myroot] = {}
127 # Create a RootConfig instance that references
128 # the FakeVartree instead of the real one.
129 self.roots[myroot] = RootConfig(
130 trees[myroot]["vartree"].settings,
132 trees[myroot]["root_config"].setconfig)
133 for tree in ("porttree", "bintree"):
134 self.trees[myroot][tree] = trees[myroot][tree]
135 self.trees[myroot]["vartree"] = \
136 FakeVartree(trees[myroot]["root_config"],
137 pkg_cache=self._pkg_cache,
138 pkg_root_config=self.roots[myroot],
139 dynamic_deps=dynamic_deps,
140 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
141 self.pkgsettings[myroot] = portage.config(
142 clone=self.trees[myroot]["vartree"].settings)
144 self._required_set_names = set(["world"])
146 atoms = ' '.join(myopts.get("--exclude", [])).split()
147 self.excluded_pkgs = _wildcard_set(atoms)
148 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
149 self.reinstall_atoms = _wildcard_set(atoms)
150 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
151 self.usepkg_exclude = _wildcard_set(atoms)
152 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
153 self.useoldpkg_atoms = _wildcard_set(atoms)
154 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
155 self.rebuild_exclude = _wildcard_set(atoms)
156 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
157 self.rebuild_ignore = _wildcard_set(atoms)
159 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
160 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
161 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
163 class _depgraph_sets(object):
165 # contains all sets added to the graph
167 # contains non-set atoms given as arguments
168 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
169 # contains all atoms from all sets added to the graph, including
170 # atoms given as arguments
171 self.atoms = InternalPackageSet(allow_repo=True)
172 self.atom_arg_map = {}
174 class _rebuild_config(object):
175 def __init__(self, frozen_config, backtrack_parameters):
176 self._graph = digraph()
177 self._frozen_config = frozen_config
178 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
179 self.orig_rebuild_list = self.rebuild_list.copy()
180 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
181 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
182 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
183 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
184 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
185 self.rebuild_if_unbuilt)
187 def add(self, dep_pkg, dep):
188 parent = dep.collapsed_parent
189 priority = dep.collapsed_priority
190 rebuild_exclude = self._frozen_config.rebuild_exclude
191 rebuild_ignore = self._frozen_config.rebuild_ignore
192 if (self.rebuild and isinstance(parent, Package) and
193 parent.built and priority.buildtime and
194 isinstance(dep_pkg, Package) and
195 not rebuild_exclude.findAtomForPackage(parent) and
196 not rebuild_ignore.findAtomForPackage(dep_pkg)):
197 self._graph.add(dep_pkg, parent, priority)
199 def _needs_rebuild(self, dep_pkg):
200 """Check whether packages that depend on dep_pkg need to be rebuilt."""
201 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
202 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
205 if self.rebuild_if_unbuilt:
206 # dep_pkg is being installed from source, so binary
207 # packages for parents are invalid. Force rebuild
210 trees = self._frozen_config.trees
211 vardb = trees[dep_pkg.root]["vartree"].dbapi
212 if self.rebuild_if_new_rev:
213 # Parent packages are valid if a package with the same
214 # cpv is already installed.
215 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
217 # Otherwise, parent packages are valid if a package with the same
218 # version (excluding revision) is already installed.
219 assert self.rebuild_if_new_ver
220 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
221 for inst_cpv in vardb.match(dep_pkg.slot_atom):
222 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
223 if inst_cpv_norev == cpv_norev:
228 def _trigger_rebuild(self, parent, build_deps):
229 root_slot = (parent.root, parent.slot_atom)
230 if root_slot in self.rebuild_list:
232 trees = self._frozen_config.trees
234 for slot_atom, dep_pkg in build_deps.items():
235 dep_root_slot = (dep_pkg.root, slot_atom)
236 if self._needs_rebuild(dep_pkg):
237 self.rebuild_list.add(root_slot)
239 elif ("--usepkg" in self._frozen_config.myopts and
240 (dep_root_slot in self.reinstall_list or
241 dep_root_slot in self.rebuild_list or
242 not dep_pkg.installed)):
244 # A direct rebuild dependency is being installed. We
245 # should update the parent as well to the latest binary,
246 # if that binary is valid.
248 # To validate the binary, we check whether all of the
249 # rebuild dependencies are present on the same binhost.
251 # 1) If parent is present on the binhost, but one of its
252 # rebuild dependencies is not, then the parent should
253 # be rebuilt from source.
254 # 2) Otherwise, the parent binary is assumed to be valid,
255 # because all of its rebuild dependencies are
257 bintree = trees[parent.root]["bintree"]
258 uri = bintree.get_pkgindex_uri(parent.cpv)
259 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
260 bindb = bintree.dbapi
261 if self.rebuild_if_new_ver and uri and uri != dep_uri:
262 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
263 for cpv in bindb.match(dep_pkg.slot_atom):
264 if cpv_norev == catpkgsplit(cpv)[:-1]:
265 dep_uri = bintree.get_pkgindex_uri(cpv)
268 if uri and uri != dep_uri:
269 # 1) Remote binary package is invalid because it was
270 # built without dep_pkg. Force rebuild.
271 self.rebuild_list.add(root_slot)
273 elif (parent.installed and
274 root_slot not in self.reinstall_list):
276 bin_build_time, = bindb.aux_get(parent.cpv,
280 if bin_build_time != _unicode(parent.build_time):
281 # 2) Remote binary package is valid, and local package
282 # is not up to date. Force reinstall.
285 self.reinstall_list.add(root_slot)
288 def trigger_rebuilds(self):
290 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
291 depends on pkgA at both build-time and run-time, pkgB needs to be
298 leaf_nodes = deque(graph.leaf_nodes())
300 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
301 # will always know which children are being rebuilt.
304 # We'll have to drop an edge. This should be quite rare.
305 leaf_nodes.append(graph.order[-1])
307 node = leaf_nodes.popleft()
308 if node not in graph:
309 # This can be triggered by circular dependencies.
311 slot_atom = node.slot_atom
313 # Remove our leaf node from the graph, keeping track of deps.
314 parents = graph.parent_nodes(node)
316 node_build_deps = build_deps.get(node, {})
317 for parent in parents:
319 # Ignore a direct cycle.
321 parent_bdeps = build_deps.setdefault(parent, {})
322 parent_bdeps[slot_atom] = node
323 if not graph.child_nodes(parent):
324 leaf_nodes.append(parent)
326 # Trigger rebuilds for our leaf node. Because all of our children
327 # have been processed, the build_deps will be completely filled in,
328 # and self.rebuild_list / self.reinstall_list will tell us whether
329 # any of our children need to be rebuilt or reinstalled.
330 if self._trigger_rebuild(node, node_build_deps):
336 class _dynamic_depgraph_config(object):
338 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
339 self.myparams = myparams.copy()
340 self._vdb_loaded = False
341 self._allow_backtracking = allow_backtracking
342 # Maps slot atom to package for each Package added to the graph.
343 self._slot_pkg_map = {}
344 # Maps nodes to the reasons they were selected for reinstallation.
345 self._reinstall_nodes = {}
347 # Contains a filtered view of preferred packages that are selected
348 # from available repositories.
349 self._filtered_trees = {}
350 # Contains installed packages and new packages that have been added
352 self._graph_trees = {}
353 # Caches visible packages returned from _select_package, for use in
354 # depgraph._iter_atoms_for_pkg() SLOT logic.
355 self._visible_pkgs = {}
356 #contains the args created by select_files
357 self._initial_arg_list = []
358 self.digraph = portage.digraph()
359 # manages sets added to the graph
361 # contains all nodes pulled in by self.sets
362 self._set_nodes = set()
363 # Contains only Blocker -> Uninstall edges
364 self._blocker_uninstalls = digraph()
365 # Contains only Package -> Blocker edges
366 self._blocker_parents = digraph()
367 # Contains only irrelevant Package -> Blocker edges
368 self._irrelevant_blockers = digraph()
369 # Contains only unsolvable Package -> Blocker edges
370 self._unsolvable_blockers = digraph()
371 # Contains all Blocker -> Blocked Package edges
372 self._blocked_pkgs = digraph()
373 # Contains world packages that have been protected from
374 # uninstallation but may not have been added to the graph
375 # if the graph is not complete yet.
376 self._blocked_world_pkgs = {}
377 # Contains packages whose dependencies have been traversed.
378 # This use used to check if we have accounted for blockers
379 # relevant to a package.
380 self._traversed_pkg_deps = set()
381 # This should be ordered such that the backtracker will
382 # attempt to solve conflicts which occurred earlier first,
383 # since an earlier conflict can be the cause of a conflict
384 # which occurs later.
385 self._slot_collision_info = OrderedDict()
386 # Slot collision nodes are not allowed to block other packages since
387 # blocker validation is only able to account for one package per slot.
388 self._slot_collision_nodes = set()
389 self._parent_atoms = {}
390 self._slot_conflict_handler = None
391 self._circular_dependency_handler = None
392 self._serialized_tasks_cache = None
393 self._scheduler_graph = None
394 self._displayed_list = None
395 self._pprovided_args = []
396 self._missing_args = []
397 self._masked_installed = set()
398 self._masked_license_updates = set()
399 self._unsatisfied_deps_for_display = []
400 self._unsatisfied_blockers_for_display = None
401 self._circular_deps_for_display = None
403 self._dep_disjunctive_stack = []
404 self._unsatisfied_deps = []
405 self._initially_unsatisfied_deps = []
406 self._ignored_deps = []
407 self._highest_pkg_cache = {}
409 # Binary packages that have been rejected because their USE
410 # didn't match the user's config. It maps packages to a set
411 # of flags causing the rejection.
412 self.ignored_binaries = {}
414 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
415 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
416 self._needed_license_changes = backtrack_parameters.needed_license_changes
417 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
418 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
419 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
420 self._prune_rebuilds = backtrack_parameters.prune_rebuilds
421 self._need_restart = False
422 # For conditions that always require user intervention, such as
423 # unsatisfied REQUIRED_USE (currently has no autounmask support).
424 self._skip_restart = False
425 self._backtrack_infos = {}
427 self._buildpkgonly_deps_unsatisfied = False
428 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
429 self._success_without_autounmask = False
430 self._traverse_ignored_deps = False
431 self._complete_mode = False
432 self._slot_operator_deps = {}
434 for myroot in depgraph._frozen_config.trees:
435 self.sets[myroot] = _depgraph_sets()
436 self._slot_pkg_map[myroot] = {}
437 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
438 # This dbapi instance will model the state that the vdb will
439 # have after new packages have been installed.
440 fakedb = PackageVirtualDbapi(vardb.settings)
442 self.mydbapi[myroot] = fakedb
445 graph_tree.dbapi = fakedb
446 self._graph_trees[myroot] = {}
447 self._filtered_trees[myroot] = {}
448 # Substitute the graph tree for the vartree in dep_check() since we
449 # want atom selections to be consistent with package selections
450 # have already been made.
451 self._graph_trees[myroot]["porttree"] = graph_tree
452 self._graph_trees[myroot]["vartree"] = graph_tree
453 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
454 self._graph_trees[myroot]["graph"] = self.digraph
457 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
458 self._filtered_trees[myroot]["porttree"] = filtered_tree
459 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
461 # Passing in graph_tree as the vartree here could lead to better
462 # atom selections in some cases by causing atoms for packages that
463 # have been added to the graph to be preferred over other choices.
464 # However, it can trigger atom selections that result in
465 # unresolvable direct circular dependencies. For example, this
466 # happens with gwydion-dylan which depends on either itself or
467 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
468 # gwydion-dylan-bin needs to be selected in order to avoid a
469 # an unresolvable direct circular dependency.
471 # To solve the problem described above, pass in "graph_db" so that
472 # packages that have been added to the graph are distinguishable
473 # from other available packages and installed packages. Also, pass
474 # the parent package into self._select_atoms() calls so that
475 # unresolvable direct circular dependencies can be detected and
476 # avoided when possible.
477 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
478 self._filtered_trees[myroot]["graph"] = self.digraph
479 self._filtered_trees[myroot]["vartree"] = \
480 depgraph._frozen_config.trees[myroot]["vartree"]
483 # (db, pkg_type, built, installed, db_keys)
484 if "remove" in self.myparams:
485 # For removal operations, use _dep_check_composite_db
486 # for availability and visibility checks. This provides
487 # consistency with install operations, so we don't
488 # get install/uninstall cycles like in bug #332719.
489 self._graph_trees[myroot]["porttree"] = filtered_tree
491 if "--usepkgonly" not in depgraph._frozen_config.myopts:
492 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
493 db_keys = list(portdb._aux_cache_keys)
494 dbs.append((portdb, "ebuild", False, False, db_keys))
496 if "--usepkg" in depgraph._frozen_config.myopts:
497 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
498 db_keys = list(bindb._aux_cache_keys)
499 dbs.append((bindb, "binary", True, False, db_keys))
501 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
502 db_keys = list(depgraph._frozen_config._trees_orig[myroot
503 ]["vartree"].dbapi._aux_cache_keys)
504 dbs.append((vardb, "installed", True, True, db_keys))
505 self._filtered_trees[myroot]["dbs"] = dbs
507 class depgraph(object):
509 pkg_tree_map = RootConfig.pkg_tree_map
511 def __init__(self, settings, trees, myopts, myparams, spinner,
512 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
513 if frozen_config is None:
514 frozen_config = _frozen_depgraph_config(settings, trees,
516 self._frozen_config = frozen_config
517 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
518 allow_backtracking, backtrack_parameters)
519 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
521 self._select_atoms = self._select_atoms_highest_available
522 self._select_package = self._select_pkg_highest_available
524 self._event_loop = (portage._internal_caller and
525 global_event_loop() or EventLoop(main=False))
529 Load installed package metadata if appropriate. This used to be called
530 from the constructor, but that wasn't very nice since this procedure
531 is slow and it generates spinner output. So, now it's called on-demand
532 by various methods when necessary.
535 if self._dynamic_config._vdb_loaded:
538 for myroot in self._frozen_config.trees:
540 dynamic_deps = self._dynamic_config.myparams.get(
541 "dynamic_deps", "y") != "n"
542 preload_installed_pkgs = \
543 "--nodeps" not in self._frozen_config.myopts
545 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
546 if not fake_vartree.dbapi:
547 # This needs to be called for the first depgraph, but not for
548 # backtracking depgraphs that share the same frozen_config.
551 # FakeVartree.sync() populates virtuals, and we want
552 # self.pkgsettings to have them populated too.
553 self._frozen_config.pkgsettings[myroot] = \
554 portage.config(clone=fake_vartree.settings)
556 if preload_installed_pkgs:
557 vardb = fake_vartree.dbapi
558 fakedb = self._dynamic_config._graph_trees[
559 myroot]["vartree"].dbapi
563 fakedb.cpv_inject(pkg)
565 max_jobs = self._frozen_config.myopts.get("--jobs")
566 max_load = self._frozen_config.myopts.get("--load-average")
567 scheduler = TaskScheduler(
568 self._dynamic_deps_preload(fake_vartree, fakedb),
571 event_loop=fake_vartree._portdb._event_loop)
575 self._dynamic_config._vdb_loaded = True
577 def _dynamic_deps_preload(self, fake_vartree, fakedb):
578 portdb = fake_vartree._portdb
579 for pkg in fake_vartree.dbapi:
580 self._spinner_update()
581 fakedb.cpv_inject(pkg)
582 ebuild_path, repo_path = \
583 portdb.findname2(pkg.cpv, myrepo=pkg.repo)
584 if ebuild_path is None:
585 fake_vartree.dynamic_deps_preload(pkg, None)
587 metadata, ebuild_hash = portdb._pull_valid_cache(
588 pkg.cpv, ebuild_path, repo_path)
589 if metadata is not None:
590 fake_vartree.dynamic_deps_preload(pkg, metadata)
592 proc = EbuildMetadataPhase(cpv=pkg.cpv,
593 ebuild_hash=ebuild_hash,
594 portdb=portdb, repo_path=repo_path,
595 settings=portdb.doebuild_settings)
596 proc.addExitListener(
597 self._dynamic_deps_proc_exit(pkg, fake_vartree))
600 class _dynamic_deps_proc_exit(object):
602 __slots__ = ('_pkg', '_fake_vartree')
604 def __init__(self, pkg, fake_vartree):
606 self._fake_vartree = fake_vartree
608 def __call__(self, proc):
610 if proc.returncode == os.EX_OK:
611 metadata = proc.metadata
612 self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
614 def _spinner_update(self):
615 if self._frozen_config.spinner:
616 self._frozen_config.spinner.update()
618 def _show_ignored_binaries(self):
620 Show binaries that have been ignored because their USE didn't
621 match the user's config.
623 if not self._dynamic_config.ignored_binaries \
624 or '--quiet' in self._frozen_config.myopts \
625 or self._dynamic_config.myparams.get(
626 "binpkg_respect_use") in ("y", "n"):
629 for pkg in list(self._dynamic_config.ignored_binaries):
631 selected_pkg = self._dynamic_config.mydbapi[pkg.root
632 ].match_pkgs(pkg.slot_atom)
637 selected_pkg = selected_pkg[-1]
638 if selected_pkg > pkg:
639 self._dynamic_config.ignored_binaries.pop(pkg)
642 if selected_pkg.installed and \
643 selected_pkg.cpv == pkg.cpv and \
644 selected_pkg.build_time == pkg.build_time:
645 # We don't care about ignored binaries when an
646 # identical installed instance is selected to
648 self._dynamic_config.ignored_binaries.pop(pkg)
651 if not self._dynamic_config.ignored_binaries:
654 self._show_merge_list()
656 writemsg("\n!!! The following binary packages have been ignored " + \
657 "due to non matching USE:\n\n", noiselevel=-1)
659 for pkg, flags in self._dynamic_config.ignored_binaries.items():
661 for flag in sorted(flags):
662 if flag not in pkg.use.enabled:
664 flag_display.append(flag)
665 flag_display = " ".join(flag_display)
666 # The user can paste this line into package.use
667 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
668 if pkg.root_config.settings["ROOT"] != "/":
669 writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
670 writemsg("\n", noiselevel=-1)
674 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
675 " from ignoring these binary packages if possible.",
676 " Using --binpkg-respect-use=y will silence this warning."
681 line = colorize("INFORM", line)
682 writemsg(line + "\n", noiselevel=-1)
684 def _get_missed_updates(self):
686 # In order to minimize noise, show only the highest
687 # missed update from each SLOT.
689 for pkg, mask_reasons in \
690 self._dynamic_config._runtime_pkg_mask.items():
692 # Exclude installed here since we only
693 # want to show available updates.
695 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
696 ].match_pkgs(pkg.slot_atom)
697 if not chosen_pkg or chosen_pkg[-1] >= pkg:
699 k = (pkg.root, pkg.slot_atom)
700 if k in missed_updates:
701 other_pkg, mask_type, parent_atoms = missed_updates[k]
704 for mask_type, parent_atoms in mask_reasons.items():
707 missed_updates[k] = (pkg, mask_type, parent_atoms)
710 return missed_updates
712 def _show_missed_update(self):
714 missed_updates = self._get_missed_updates()
716 if not missed_updates:
719 missed_update_types = {}
720 for pkg, mask_type, parent_atoms in missed_updates.values():
721 missed_update_types.setdefault(mask_type,
722 []).append((pkg, parent_atoms))
724 if '--quiet' in self._frozen_config.myopts and \
725 '--debug' not in self._frozen_config.myopts:
726 missed_update_types.pop("slot conflict", None)
727 missed_update_types.pop("missing dependency", None)
729 self._show_missed_update_slot_conflicts(
730 missed_update_types.get("slot conflict"))
732 self._show_missed_update_unsatisfied_dep(
733 missed_update_types.get("missing dependency"))
735 def _show_missed_update_unsatisfied_dep(self, missed_updates):
737 if not missed_updates:
740 self._show_merge_list()
741 backtrack_masked = []
743 for pkg, parent_atoms in missed_updates:
746 for parent, root, atom in parent_atoms:
747 self._show_unsatisfied_dep(root, atom, myparent=parent,
748 check_backtrack=True)
749 except self._backtrack_mask:
750 # This is displayed below in abbreviated form.
751 backtrack_masked.append((pkg, parent_atoms))
754 writemsg("\n!!! The following update has been skipped " + \
755 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
757 writemsg(str(pkg.slot_atom), noiselevel=-1)
758 if pkg.root_config.settings["ROOT"] != "/":
759 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
760 writemsg("\n", noiselevel=-1)
762 for parent, root, atom in parent_atoms:
763 self._show_unsatisfied_dep(root, atom, myparent=parent)
764 writemsg("\n", noiselevel=-1)
767 # These are shown in abbreviated form, in order to avoid terminal
768 # flooding from mask messages as reported in bug #285832.
769 writemsg("\n!!! The following update(s) have been skipped " + \
770 "due to unsatisfied dependencies\n" + \
771 "!!! triggered by backtracking:\n\n", noiselevel=-1)
772 for pkg, parent_atoms in backtrack_masked:
773 writemsg(str(pkg.slot_atom), noiselevel=-1)
774 if pkg.root_config.settings["ROOT"] != "/":
775 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
776 writemsg("\n", noiselevel=-1)
778 def _show_missed_update_slot_conflicts(self, missed_updates):
780 if not missed_updates:
783 self._show_merge_list()
785 msg.append("\nWARNING: One or more updates have been " + \
786 "skipped due to a dependency conflict:\n\n")
789 for pkg, parent_atoms in missed_updates:
790 msg.append(str(pkg.slot_atom))
791 if pkg.root_config.settings["ROOT"] != "/":
792 msg.append(" for %s" % (pkg.root,))
795 for parent, atom in parent_atoms:
799 msg.append(" conflicts with\n")
801 if isinstance(parent,
802 (PackageArg, AtomArg)):
803 # For PackageArg and AtomArg types, it's
804 # redundant to display the atom attribute.
805 msg.append(str(parent))
807 # Display the specific atom from SetArg or
809 msg.append("%s required by %s" % (atom, parent))
813 writemsg("".join(msg), noiselevel=-1)
815 def _show_slot_collision_notice(self):
816 """Show an informational message advising the user to mask one of the
817 the packages. In some cases it may be possible to resolve this
818 automatically, but support for backtracking (removal nodes that have
819 already been selected) will be required in order to handle all possible
823 if not self._dynamic_config._slot_collision_info:
826 self._show_merge_list()
828 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
829 handler = self._dynamic_config._slot_conflict_handler
831 conflict = handler.get_conflict()
832 writemsg(conflict, noiselevel=-1)
834 explanation = handler.get_explanation()
836 writemsg(explanation, noiselevel=-1)
839 if "--quiet" in self._frozen_config.myopts:
843 msg.append("It may be possible to solve this problem ")
844 msg.append("by using package.mask to prevent one of ")
845 msg.append("those packages from being selected. ")
846 msg.append("However, it is also possible that conflicting ")
847 msg.append("dependencies exist such that they are impossible to ")
848 msg.append("satisfy simultaneously. If such a conflict exists in ")
849 msg.append("the dependencies of two different packages, then those ")
850 msg.append("packages can not be installed simultaneously.")
851 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
852 if not self._dynamic_config._allow_backtracking and \
853 (backtrack_opt is None or \
854 (backtrack_opt > 0 and backtrack_opt < 30)):
855 msg.append(" You may want to try a larger value of the ")
856 msg.append("--backtrack option, such as --backtrack=30, ")
857 msg.append("in order to see if that will solve this conflict ")
858 msg.append("automatically.")
860 for line in textwrap.wrap(''.join(msg), 70):
861 writemsg(line + '\n', noiselevel=-1)
862 writemsg('\n', noiselevel=-1)
865 msg.append("For more information, see MASKED PACKAGES ")
866 msg.append("section in the emerge man page or refer ")
867 msg.append("to the Gentoo Handbook.")
868 for line in textwrap.wrap(''.join(msg), 70):
869 writemsg(line + '\n', noiselevel=-1)
870 writemsg('\n', noiselevel=-1)
872 def _process_slot_conflicts(self):
874 If there are any slot conflicts and backtracking is enabled,
875 _complete_graph should complete the graph before this method
876 is called, so that all relevant reverse dependencies are
877 available for use in backtracking decisions.
879 for (slot_atom, root), slot_nodes in \
880 self._dynamic_config._slot_collision_info.items():
881 self._process_slot_conflict(root, slot_atom, slot_nodes)
883 def _process_slot_conflict(self, root, slot_atom, slot_nodes):
885 Process slot conflict data to identify specific atoms which
886 lead to conflict. These atoms only match a subset of the
887 packages that have been pulled into a given slot.
890 debug = "--debug" in self._frozen_config.myopts
892 slot_parent_atoms = set()
893 for pkg in slot_nodes:
894 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
897 slot_parent_atoms.update(parent_atoms)
901 for pkg in slot_nodes:
903 if self._dynamic_config._allow_backtracking and \
904 pkg in self._dynamic_config._runtime_pkg_mask:
907 "!!! backtracking loop detected: %s %s\n" % \
909 self._dynamic_config._runtime_pkg_mask[pkg]),
910 level=logging.DEBUG, noiselevel=-1)
912 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
913 if parent_atoms is None:
915 self._dynamic_config._parent_atoms[pkg] = parent_atoms
918 for parent_atom in slot_parent_atoms:
919 if parent_atom in parent_atoms:
921 # Use package set for matching since it will match via
922 # PROVIDE when necessary, while match_from_list does not.
923 parent, atom = parent_atom
924 atom_set = InternalPackageSet(
925 initial_atoms=(atom,), allow_repo=True)
926 if atom_set.findAtomForPackage(pkg,
927 modified_use=self._pkg_use_enabled(pkg)):
928 parent_atoms.add(parent_atom)
931 conflict_atoms.setdefault(parent_atom, set()).add(pkg)
934 conflict_pkgs.append(pkg)
936 if conflict_pkgs and \
937 self._dynamic_config._allow_backtracking and \
938 not self._accept_blocker_conflicts():
940 for pkg in conflict_pkgs:
941 if self._slot_conflict_backtrack_abi(pkg,
942 slot_nodes, conflict_atoms):
943 backtrack_infos = self._dynamic_config._backtrack_infos
944 config = backtrack_infos.setdefault("config", {})
945 config.setdefault("slot_conflict_abi", set()).add(pkg)
947 remaining.append(pkg)
949 self._slot_confict_backtrack(root, slot_atom,
950 slot_parent_atoms, remaining)
952 def _slot_confict_backtrack(self, root, slot_atom,
953 all_parents, conflict_pkgs):
955 debug = "--debug" in self._frozen_config.myopts
956 existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
958 # The ordering of backtrack_data can make
959 # a difference here, because both mask actions may lead
960 # to valid, but different, solutions and the one with
961 # 'existing_node' masked is usually the better one. Because
962 # of that, we choose an order such that
963 # the backtracker will first explore the choice with
964 # existing_node masked. The backtracker reverses the
965 # order, so the order it uses is the reverse of the
966 # order shown here. See bug #339606.
967 if existing_node in conflict_pkgs and \
968 existing_node is not conflict_pkgs[-1]:
969 conflict_pkgs.remove(existing_node)
970 conflict_pkgs.append(existing_node)
971 for to_be_masked in conflict_pkgs:
972 # For missed update messages, find out which
973 # atoms matched to_be_selected that did not
974 # match to_be_masked.
976 self._dynamic_config._parent_atoms.get(to_be_masked, set())
977 conflict_atoms = set(parent_atom for parent_atom in all_parents \
978 if parent_atom not in parent_atoms)
979 backtrack_data.append((to_be_masked, conflict_atoms))
981 if len(backtrack_data) > 1:
982 # NOTE: Generally, we prefer to mask the higher
983 # version since this solves common cases in which a
984 # lower version is needed so that all dependencies
985 # will be satisfied (bug #337178). However, if
986 # existing_node happens to be installed then we
987 # mask that since this is a common case that is
988 # triggered when --update is not enabled.
989 if existing_node.installed:
991 elif any(pkg > existing_node for pkg in conflict_pkgs):
992 backtrack_data.reverse()
994 to_be_masked = backtrack_data[-1][0]
996 self._dynamic_config._backtrack_infos.setdefault(
997 "slot conflict", []).append(backtrack_data)
998 self._dynamic_config._need_restart = True
1003 msg.append("backtracking due to slot conflict:")
1004 msg.append(" first package: %s" % existing_node)
1005 msg.append(" package to mask: %s" % to_be_masked)
1006 msg.append(" slot: %s" % slot_atom)
1007 msg.append(" parents: %s" % ", ".join( \
1008 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1010 writemsg_level("".join("%s\n" % l for l in msg),
1011 noiselevel=-1, level=logging.DEBUG)
1013 def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
1015 If one or more conflict atoms have a slot/sub-slot dep that can be resolved
1016 by rebuilding the parent package, then schedule the rebuild via
1017 backtracking, and return True. Otherwise, return False.
1020 found_update = False
1021 for parent_atom, conflict_pkgs in conflict_atoms.items():
1022 parent, atom = parent_atom
1023 if atom.slot_operator != "=" or not parent.built:
1026 if pkg not in conflict_pkgs:
1029 for other_pkg in slot_nodes:
1030 if other_pkg in conflict_pkgs:
1033 dep = Dependency(atom=atom, child=other_pkg,
1034 parent=parent, root=pkg.root)
1037 self._slot_operator_update_probe_slot_conflict(dep)
1038 if new_dep is not None:
1039 self._slot_operator_update_backtrack(dep,
1045 def _slot_change_probe(self, dep):
1048 @return: True if dep.child should be rebuilt due to a change
1049 in sub-slot (without revbump, as in bug #456208).
1051 if not (isinstance(dep.parent, Package) and \
1052 not dep.parent.built and dep.child.built):
1055 root_config = self._frozen_config.roots[dep.root]
1058 matches.append(self._pkg(dep.child.cpv, "ebuild",
1059 root_config, myrepo=dep.child.repo))
1060 except PackageNotFound:
1063 for unbuilt_child in chain(matches,
1064 self._iter_match_pkgs(root_config, "ebuild",
1065 Atom("=%s" % (dep.child.cpv,)))):
1066 if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
1068 if self._frozen_config.excluded_pkgs.findAtomForPackage(
1070 modified_use=self._pkg_use_enabled(unbuilt_child)):
1072 if not self._pkg_visibility_check(unbuilt_child):
1078 if unbuilt_child.slot == dep.child.slot and \
1079 unbuilt_child.sub_slot == dep.child.sub_slot:
1082 return unbuilt_child
1084 def _slot_change_backtrack(self, dep, new_child_slot):
1086 if "--debug" in self._frozen_config.myopts:
1090 msg.append("backtracking due to slot/sub-slot change:")
1091 msg.append(" child package: %s" % child)
1092 msg.append(" child slot: %s/%s" %
1093 (child.slot, child.sub_slot))
1094 msg.append(" new child: %s" % new_child_slot)
1095 msg.append(" new child slot: %s/%s" %
1096 (new_child_slot.slot, new_child_slot.sub_slot))
1097 msg.append(" parent package: %s" % dep.parent)
1098 msg.append(" atom: %s" % dep.atom)
1100 writemsg_level("\n".join(msg),
1101 noiselevel=-1, level=logging.DEBUG)
1102 backtrack_infos = self._dynamic_config._backtrack_infos
1103 config = backtrack_infos.setdefault("config", {})
1105 # mask unwanted binary packages if necessary
1107 if not child.installed:
1108 masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
1110 config.setdefault("slot_operator_mask_built", {}).update(masks)
1112 # trigger replacement of installed packages if necessary
1115 replacement_atom = self._replace_installed_atom(child)
1116 if replacement_atom is not None:
1117 reinstalls.add((child.root, replacement_atom))
1119 config.setdefault("slot_operator_replace_installed",
1120 set()).update(reinstalls)
1122 self._dynamic_config._need_restart = True
1124 def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
1126 if new_child_slot is None:
1129 child = new_child_slot
1130 if "--debug" in self._frozen_config.myopts:
1134 msg.append("backtracking due to missed slot abi update:")
1135 msg.append(" child package: %s" % child)
1136 if new_child_slot is not None:
1137 msg.append(" new child slot package: %s" % new_child_slot)
1138 msg.append(" parent package: %s" % dep.parent)
1139 if new_dep is not None:
1140 msg.append(" new parent pkg: %s" % new_dep.parent)
1141 msg.append(" atom: %s" % dep.atom)
1143 writemsg_level("\n".join(msg),
1144 noiselevel=-1, level=logging.DEBUG)
1145 backtrack_infos = self._dynamic_config._backtrack_infos
1146 config = backtrack_infos.setdefault("config", {})
1148 # mask unwanted binary packages if necessary
1150 if new_child_slot is None:
1151 if not child.installed:
1152 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
1153 if not dep.parent.installed:
1154 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
1156 config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
1158 # trigger replacement of installed packages if necessary
1159 abi_reinstalls = set()
1160 if dep.parent.installed:
1161 if new_dep is not None:
1162 replacement_atom = new_dep.parent.slot_atom
1164 replacement_atom = self._replace_installed_atom(dep.parent)
1165 if replacement_atom is not None:
1166 abi_reinstalls.add((dep.parent.root, replacement_atom))
1167 if new_child_slot is None and child.installed:
1168 replacement_atom = self._replace_installed_atom(child)
1169 if replacement_atom is not None:
1170 abi_reinstalls.add((child.root, replacement_atom))
1172 config.setdefault("slot_operator_replace_installed",
1173 set()).update(abi_reinstalls)
1175 self._dynamic_config._need_restart = True
1177 def _slot_operator_update_probe_slot_conflict(self, dep):
1178 new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
1180 if new_dep is not None:
1183 if self._dynamic_config._autounmask is True:
1185 for autounmask_level in self._autounmask_levels():
1187 new_dep = self._slot_operator_update_probe(dep,
1188 slot_conflict=True, autounmask_level=autounmask_level)
1190 if new_dep is not None:
1195 def _slot_operator_update_probe(self, dep, new_child_slot=False,
1196 slot_conflict=False, autounmask_level=None):
1198 slot/sub-slot := operators tend to prevent updates from getting pulled in,
1199 since installed packages pull in packages with the slot/sub-slot that they
1200 were built against. Detect this case so that we can schedule rebuilds
1201 and reinstalls when appropriate.
1202 NOTE: This function only searches for updates that involve upgrades
1203 to higher versions, since the logic required to detect when a
1204 downgrade would be desirable is not implemented.
1207 if dep.child.installed and \
1208 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
1209 modified_use=self._pkg_use_enabled(dep.child)):
1212 if dep.parent.installed and \
1213 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1214 modified_use=self._pkg_use_enabled(dep.parent)):
1217 debug = "--debug" in self._frozen_config.myopts
1218 selective = "selective" in self._dynamic_config.myparams
1219 want_downgrade = None
1221 for replacement_parent in self._iter_similar_available(dep.parent,
1222 dep.parent.slot_atom, autounmask_level=autounmask_level):
1224 selected_atoms = None
1226 for atom in replacement_parent.validated_atoms:
1227 if not atom.slot_operator == "=" or \
1229 atom.cp != dep.atom.cp:
1232 # Discard USE deps, we're only searching for an approximate
1233 # pattern, and dealing with USE states is too complex for
1235 unevaluated_atom = atom.unevaluated_atom
1236 atom = atom.without_use
1238 if replacement_parent.built and \
1239 portage.dep._match_slot(atom, dep.child):
1240 # Our selected replacement_parent appears to be built
1241 # for the existing child selection. So, discard this
1242 # parent and search for another.
1245 for pkg in self._iter_similar_available(
1247 if pkg.slot == dep.child.slot and \
1248 pkg.sub_slot == dep.child.sub_slot:
1249 # If slot/sub-slot is identical, then there's
1250 # no point in updating.
1253 if pkg.slot == dep.child.slot:
1256 # the new slot only matters if the
1257 # package version is higher
1260 if pkg.slot != dep.child.slot:
1263 if want_downgrade is None:
1264 want_downgrade = self._downgrade_probe(dep.child)
1265 # be careful not to trigger a rebuild when
1266 # the only version available with a
1267 # different slot_operator is an older version
1268 if not want_downgrade:
1271 insignificant = False
1272 if not slot_conflict and \
1274 dep.parent.installed and \
1275 dep.child.installed and \
1276 dep.parent.cpv == replacement_parent.cpv and \
1277 dep.child.cpv == pkg.cpv:
1278 # Then can happen if the child's sub-slot changed
1279 # without a revision bump. The sub-slot change is
1280 # considered insignificant until one of its parent
1281 # packages needs to be rebuilt (which may trigger a
1283 insignificant = True
1285 if not insignificant:
1286 # Evaluate USE conditionals and || deps, in order
1287 # to see if this atom is really desirable, since
1288 # otherwise we may trigger an undesirable rebuild
1289 # as in bug #460304.
1290 if selected_atoms is None:
1291 selected_atoms = self._select_atoms_probe(
1292 dep.child.root, replacement_parent)
1293 if unevaluated_atom not in selected_atoms:
1300 msg.append("slot_operator_update_probe:")
1301 msg.append(" existing child package: %s" % dep.child)
1302 msg.append(" existing parent package: %s" % dep.parent)
1303 msg.append(" new child package: %s" % pkg)
1304 msg.append(" new parent package: %s" % replacement_parent)
1306 msg.append("insignificant changes detected")
1308 writemsg_level("\n".join(msg),
1309 noiselevel=-1, level=logging.DEBUG)
1314 return Dependency(parent=replacement_parent,
1315 child=pkg, atom=unevaluated_atom)
1321 msg.append("slot_operator_update_probe:")
1322 msg.append(" existing child package: %s" % dep.child)
1323 msg.append(" existing parent package: %s" % dep.parent)
1324 msg.append(" new child package: %s" % None)
1325 msg.append(" new parent package: %s" % None)
1327 writemsg_level("\n".join(msg),
1328 noiselevel=-1, level=logging.DEBUG)
1332 def _slot_operator_unsatisfied_probe(self, dep):
1334 if dep.parent.installed and \
1335 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1336 modified_use=self._pkg_use_enabled(dep.parent)):
1339 debug = "--debug" in self._frozen_config.myopts
1341 for replacement_parent in self._iter_similar_available(dep.parent,
1342 dep.parent.slot_atom):
1344 for atom in replacement_parent.validated_atoms:
1345 if not atom.slot_operator == "=" or \
1347 atom.cp != dep.atom.cp:
1350 # Discard USE deps, we're only searching for an approximate
1351 # pattern, and dealing with USE states is too complex for
1353 atom = atom.without_use
1355 pkg, existing_node = self._select_package(dep.root, atom,
1356 onlydeps=dep.onlydeps)
1364 msg.append("slot_operator_unsatisfied_probe:")
1365 msg.append(" existing parent package: %s" % dep.parent)
1366 msg.append(" existing parent atom: %s" % dep.atom)
1367 msg.append(" new parent package: %s" % replacement_parent)
1368 msg.append(" new child package: %s" % pkg)
1370 writemsg_level("\n".join(msg),
1371 noiselevel=-1, level=logging.DEBUG)
1379 msg.append("slot_operator_unsatisfied_probe:")
1380 msg.append(" existing parent package: %s" % dep.parent)
1381 msg.append(" existing parent atom: %s" % dep.atom)
1382 msg.append(" new parent package: %s" % None)
1383 msg.append(" new child package: %s" % None)
1385 writemsg_level("\n".join(msg),
1386 noiselevel=-1, level=logging.DEBUG)
1390 def _slot_operator_unsatisfied_backtrack(self, dep):
1394 if "--debug" in self._frozen_config.myopts:
1398 msg.append("backtracking due to unsatisfied "
1399 "built slot-operator dep:")
1400 msg.append(" parent package: %s" % parent)
1401 msg.append(" atom: %s" % dep.atom)
1403 writemsg_level("\n".join(msg),
1404 noiselevel=-1, level=logging.DEBUG)
1406 backtrack_infos = self._dynamic_config._backtrack_infos
1407 config = backtrack_infos.setdefault("config", {})
1409 # mask unwanted binary packages if necessary
1411 if not parent.installed:
1412 masks.setdefault(parent, {})["slot_operator_mask_built"] = None
1414 config.setdefault("slot_operator_mask_built", {}).update(masks)
1416 # trigger replacement of installed packages if necessary
1418 if parent.installed:
1419 replacement_atom = self._replace_installed_atom(parent)
1420 if replacement_atom is not None:
1421 reinstalls.add((parent.root, replacement_atom))
1423 config.setdefault("slot_operator_replace_installed",
1424 set()).update(reinstalls)
1426 self._dynamic_config._need_restart = True
1428 def _downgrade_probe(self, pkg):
1430 Detect cases where a downgrade of the given package is considered
1431 desirable due to the current version being masked or unavailable.
1433 available_pkg = None
1434 for available_pkg in self._iter_similar_available(pkg,
1436 if available_pkg >= pkg:
1437 # There's an available package of the same or higher
1438 # version, so downgrade seems undesirable.
1441 return available_pkg is not None
1443 def _select_atoms_probe(self, root, pkg):
1445 use = self._pkg_use_enabled(pkg)
1446 for k in pkg._dep_keys:
1447 v = pkg._metadata.get(k)
1450 selected_atoms.extend(self._select_atoms(
1451 root, v, myuse=use, parent=pkg)[pkg])
1452 return frozenset(x.unevaluated_atom for
1453 x in selected_atoms)
1455 def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
1457 Given a package that's in the graph, do a rough check to
1458 see if a similar package is available to install. The given
1459 graph_pkg itself may be yielded only if it's not installed.
1462 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
1463 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
1464 use_ebuild_visibility = self._frozen_config.myopts.get(
1465 '--use-ebuild-visibility', 'n') != 'n'
1467 for pkg in self._iter_match_pkgs_any(
1468 graph_pkg.root_config, atom):
1469 if pkg.cp != graph_pkg.cp:
1470 # discard old-style virtual match
1474 if pkg in self._dynamic_config._runtime_pkg_mask:
1476 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1477 modified_use=self._pkg_use_enabled(pkg)):
1480 if self._equiv_binary_installed(pkg):
1482 if not (not use_ebuild_visibility and
1483 (usepkgonly or useoldpkg_atoms.findAtomForPackage(
1484 pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
1485 not self._equiv_ebuild_visible(pkg,
1486 autounmask_level=autounmask_level):
1488 if not self._pkg_visibility_check(pkg,
1489 autounmask_level=autounmask_level):
1493 def _replace_installed_atom(self, inst_pkg):
1495 Given an installed package, generate an atom suitable for
1496 slot_operator_replace_installed backtracking info. The replacement
1497 SLOT may differ from the installed SLOT, so first search by cpv.
1500 for pkg in self._iter_similar_available(inst_pkg,
1501 Atom("=%s" % inst_pkg.cpv)):
1503 return pkg.slot_atom
1504 elif not pkg.installed:
1505 # avoid using SLOT from a built instance
1506 built_pkgs.append(pkg)
1508 for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
1510 return pkg.slot_atom
1511 elif not pkg.installed:
1512 # avoid using SLOT from a built instance
1513 built_pkgs.append(pkg)
1517 for pkg in built_pkgs:
1518 if best_version is None or pkg > best_version:
1520 return best_version.slot_atom
1524 def _slot_operator_trigger_reinstalls(self):
1526 Search for packages with slot-operator deps on older slots, and schedule
1527 rebuilds if they can link to a newer slot that's in the graph.
1530 rebuild_if_new_slot = self._dynamic_config.myparams.get(
1531 "rebuild_if_new_slot", "y") == "y"
1533 for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
1535 for dep in slot_info:
1538 if atom.slot_operator is None:
1541 if not atom.slot_operator_built:
1542 new_child_slot = self._slot_change_probe(dep)
1543 if new_child_slot is not None:
1544 self._slot_change_backtrack(dep, new_child_slot)
1547 if not (dep.parent and
1548 isinstance(dep.parent, Package) and dep.parent.built):
1551 # Check for slot update first, since we don't want to
1552 # trigger reinstall of the child package when a newer
1553 # slot will be used instead.
1554 if rebuild_if_new_slot:
1555 new_dep = self._slot_operator_update_probe(dep,
1556 new_child_slot=True)
1557 if new_dep is not None:
1558 self._slot_operator_update_backtrack(dep,
1559 new_child_slot=new_dep.child)
1563 if self._slot_operator_update_probe(dep):
1564 self._slot_operator_update_backtrack(dep)
1567 def _reinstall_for_flags(self, pkg, forced_flags,
1568 orig_use, orig_iuse, cur_use, cur_iuse):
1569 """Return a set of flags that trigger reinstallation, or None if there
1570 are no such flags."""
1572 # binpkg_respect_use: Behave like newuse by default. If newuse is
1573 # False and changed_use is True, then behave like changed_use.
1574 binpkg_respect_use = (pkg.built and
1575 self._dynamic_config.myparams.get("binpkg_respect_use")
1577 newuse = "--newuse" in self._frozen_config.myopts
1578 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
1579 feature_flags = _get_feature_flags(
1580 _get_eapi_attrs(pkg.eapi))
1582 if newuse or (binpkg_respect_use and not changed_use):
1583 flags = set(orig_iuse.symmetric_difference(
1584 cur_iuse).difference(forced_flags))
1585 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
1586 cur_iuse.intersection(cur_use)))
1587 flags.difference_update(feature_flags)
1591 elif changed_use or binpkg_respect_use:
1592 flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
1593 cur_iuse.intersection(cur_use)))
1594 flags.difference_update(feature_flags)
1599 def _create_graph(self, allow_unsatisfied=False):
1600 dep_stack = self._dynamic_config._dep_stack
1601 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
1602 while dep_stack or dep_disjunctive_stack:
1603 self._spinner_update()
1605 dep = dep_stack.pop()
1606 if isinstance(dep, Package):
1607 if not self._add_pkg_deps(dep,
1608 allow_unsatisfied=allow_unsatisfied):
1611 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
1613 if dep_disjunctive_stack:
1614 if not self._pop_disjunction(allow_unsatisfied):
1618 def _expand_set_args(self, input_args, add_to_digraph=False):
1620 Iterate over a list of DependencyArg instances and yield all
1621 instances given in the input together with additional SetArg
1622 instances that are generated from nested sets.
1623 @param input_args: An iterable of DependencyArg instances
1624 @type input_args: Iterable
1625 @param add_to_digraph: If True then add SetArg instances
1626 to the digraph, in order to record parent -> child
1627 relationships from nested sets
1628 @type add_to_digraph: Boolean
1630 @return: All args given in the input together with additional
1631 SetArg instances that are generated from nested sets
1634 traversed_set_args = set()
1636 for arg in input_args:
1637 if not isinstance(arg, SetArg):
1641 root_config = arg.root_config
1642 depgraph_sets = self._dynamic_config.sets[root_config.root]
1645 arg = arg_stack.pop()
1646 if arg in traversed_set_args:
1648 traversed_set_args.add(arg)
1651 self._dynamic_config.digraph.add(arg, None,
1652 priority=BlockerDepPriority.instance)
1656 # Traverse nested sets and add them to the stack
1657 # if they're not already in the graph. Also, graph
1658 # edges between parent and nested sets.
1659 for token in arg.pset.getNonAtoms():
1660 if not token.startswith(SETPREFIX):
1662 s = token[len(SETPREFIX):]
1663 nested_set = depgraph_sets.sets.get(s)
1664 if nested_set is None:
1665 nested_set = root_config.sets.get(s)
1666 if nested_set is not None:
1667 nested_arg = SetArg(arg=token, pset=nested_set,
1668 root_config=root_config)
1669 arg_stack.append(nested_arg)
1671 self._dynamic_config.digraph.add(nested_arg, arg,
1672 priority=BlockerDepPriority.instance)
1673 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
1675 def _add_dep(self, dep, allow_unsatisfied=False):
1676 debug = "--debug" in self._frozen_config.myopts
1677 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
1678 nodeps = "--nodeps" in self._frozen_config.myopts
1680 if not buildpkgonly and \
1682 not dep.collapsed_priority.ignored and \
1683 not dep.collapsed_priority.optional and \
1684 dep.parent not in self._dynamic_config._slot_collision_nodes:
1685 if dep.parent.onlydeps:
1686 # It's safe to ignore blockers if the
1687 # parent is an --onlydeps node.
1689 # The blocker applies to the root where
1690 # the parent is or will be installed.
1691 blocker = Blocker(atom=dep.atom,
1692 eapi=dep.parent.eapi,
1693 priority=dep.priority, root=dep.parent.root)
1694 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
1697 if dep.child is None:
1698 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
1699 onlydeps=dep.onlydeps)
1701 # The caller has selected a specific package
1702 # via self._minimize_packages().
1704 existing_node = self._dynamic_config._slot_pkg_map[
1705 dep.root].get(dep_pkg.slot_atom)
1708 if (dep.collapsed_priority.optional or
1709 dep.collapsed_priority.ignored):
1710 # This is an unnecessary build-time dep.
1712 if allow_unsatisfied:
1713 self._dynamic_config._unsatisfied_deps.append(dep)
1715 self._dynamic_config._unsatisfied_deps_for_display.append(
1716 ((dep.root, dep.atom), {"myparent":dep.parent}))
1718 # The parent node should not already be in
1719 # runtime_pkg_mask, since that would trigger an
1720 # infinite backtracking loop.
1721 if self._dynamic_config._allow_backtracking:
1722 if dep.parent in self._dynamic_config._runtime_pkg_mask:
1725 "!!! backtracking loop detected: %s %s\n" % \
1727 self._dynamic_config._runtime_pkg_mask[
1728 dep.parent]), noiselevel=-1)
1729 elif dep.atom.slot_operator_built and \
1730 self._slot_operator_unsatisfied_probe(dep):
1731 self._slot_operator_unsatisfied_backtrack(dep)
1734 # Do not backtrack if only USE have to be changed in
1735 # order to satisfy the dependency. Note that when
1736 # want_restart_for_use_change sets the need_restart
1737 # flag, it causes _select_pkg_highest_available to
1738 # return None, and eventually we come through here
1739 # and skip the "missing dependency" backtracking path.
1740 dep_pkg, existing_node = \
1741 self._select_package(dep.root, dep.atom.without_use,
1742 onlydeps=dep.onlydeps)
1744 self._dynamic_config._backtrack_infos["missing dependency"] = dep
1745 self._dynamic_config._need_restart = True
1750 msg.append("backtracking due to unsatisfied dep:")
1751 msg.append(" parent: %s" % dep.parent)
1752 msg.append(" priority: %s" % dep.priority)
1753 msg.append(" root: %s" % dep.root)
1754 msg.append(" atom: %s" % dep.atom)
1756 writemsg_level("".join("%s\n" % l for l in msg),
1757 noiselevel=-1, level=logging.DEBUG)
1761 self._rebuild.add(dep_pkg, dep)
1763 ignore = dep.collapsed_priority.ignored and \
1764 not self._dynamic_config._traverse_ignored_deps
1765 if not ignore and not self._add_pkg(dep_pkg, dep):
1769 def _check_slot_conflict(self, pkg, atom):
1770 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1773 matches = pkg.cpv == existing_node.cpv
1774 if pkg != existing_node and \
1776 # Use package set for matching since it will match via
1777 # PROVIDE when necessary, while match_from_list does not.
1778 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1779 allow_repo=True).findAtomForPackage(existing_node,
1780 modified_use=self._pkg_use_enabled(existing_node)))
1782 return (existing_node, matches)
1784 def _add_pkg(self, pkg, dep):
1786 Adds a package to the depgraph, queues dependencies, and handles
1789 debug = "--debug" in self._frozen_config.myopts
1796 myparent = dep.parent
1797 priority = dep.priority
1799 if priority is None:
1800 priority = DepPriority()
1804 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1805 pkg_use_display(pkg, self._frozen_config.myopts,
1806 modified_use=self._pkg_use_enabled(pkg))),
1807 level=logging.DEBUG, noiselevel=-1)
1808 if isinstance(myparent,
1809 (PackageArg, AtomArg)):
1810 # For PackageArg and AtomArg types, it's
1811 # redundant to display the atom attribute.
1813 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1814 level=logging.DEBUG, noiselevel=-1)
1816 # Display the specific atom from SetArg or
1819 if dep.atom is not dep.atom.unevaluated_atom:
1820 uneval = " (%s)" % (dep.atom.unevaluated_atom,)
1822 "%s%s%s required by %s\n" %
1823 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
1824 level=logging.DEBUG, noiselevel=-1)
1826 # Ensure that the dependencies of the same package
1827 # are never processed more than once.
1828 previously_added = pkg in self._dynamic_config.digraph
1830 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1835 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1836 except portage.exception.InvalidDependString as e:
1837 if not pkg.installed:
1838 # should have been masked before it was selected
1842 # NOTE: REQUIRED_USE checks are delayed until after
1843 # package selection, since we want to prompt the user
1844 # for USE adjustment rather than have REQUIRED_USE
1845 # affect package selection and || dep choices.
1846 if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
1847 eapi_has_required_use(pkg.eapi):
1848 required_use_is_sat = check_required_use(
1849 pkg._metadata["REQUIRED_USE"],
1850 self._pkg_use_enabled(pkg),
1851 pkg.iuse.is_valid_flag,
1853 if not required_use_is_sat:
1854 if dep.atom is not None and dep.parent is not None:
1855 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1858 for parent_atom in arg_atoms:
1859 parent, atom = parent_atom
1860 self._add_parent_atom(pkg, parent_atom)
1864 atom = Atom("=" + pkg.cpv)
1865 self._dynamic_config._unsatisfied_deps_for_display.append(
1867 {"myparent" : dep.parent, "show_req_use" : pkg}))
1868 self._dynamic_config._skip_restart = True
1871 if not pkg.onlydeps:
1873 existing_node, existing_node_matches = \
1874 self._check_slot_conflict(pkg, dep.atom)
1875 slot_collision = False
1877 if existing_node_matches:
1878 # The existing node can be reused.
1879 if pkg != existing_node:
1881 previously_added = True
1883 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1884 except InvalidDependString as e:
1885 if not pkg.installed:
1886 # should have been masked before
1892 "%s%s %s\n" % ("Re-used Child:".ljust(15),
1893 pkg, pkg_use_display(pkg,
1894 self._frozen_config.myopts,
1895 modified_use=self._pkg_use_enabled(pkg))),
1896 level=logging.DEBUG, noiselevel=-1)
1899 self._add_slot_conflict(pkg)
1902 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1903 existing_node, pkg_use_display(existing_node,
1904 self._frozen_config.myopts,
1905 modified_use=self._pkg_use_enabled(existing_node))),
1906 level=logging.DEBUG, noiselevel=-1)
1908 slot_collision = True
1911 # Now add this node to the graph so that self.display()
1912 # can show use flags and --tree portage.output. This node is
1913 # only being partially added to the graph. It must not be
1914 # allowed to interfere with the other nodes that have been
1915 # added. Do not overwrite data for existing nodes in
1916 # self._dynamic_config.mydbapi since that data will be used for blocker
1918 # Even though the graph is now invalid, continue to process
1919 # dependencies so that things like --fetchonly can still
1920 # function despite collisions.
1922 elif not previously_added:
1923 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1924 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1925 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1926 self._dynamic_config._highest_pkg_cache.clear()
1927 self._check_masks(pkg)
1929 if not pkg.installed:
1930 # Allow this package to satisfy old-style virtuals in case it
1931 # doesn't already. Any pre-existing providers will be preferred
1934 pkgsettings.setinst(pkg.cpv, pkg._metadata)
1935 # For consistency, also update the global virtuals.
1936 settings = self._frozen_config.roots[pkg.root].settings
1938 settings.setinst(pkg.cpv, pkg._metadata)
1940 except portage.exception.InvalidDependString:
1941 if not pkg.installed:
1942 # should have been masked before it was selected
1946 self._dynamic_config._set_nodes.add(pkg)
1948 # Do this even for onlydeps, so that the
1949 # parent/child relationship is always known in case
1950 # self._show_slot_collision_notice() needs to be called later.
1951 # If a direct circular dependency is not an unsatisfied
1952 # buildtime dependency then drop it here since otherwise
1953 # it can skew the merge order calculation in an unwanted
1955 if pkg != dep.parent or \
1956 (priority.buildtime and not priority.satisfied):
1957 self._dynamic_config.digraph.add(pkg,
1958 dep.parent, priority=priority)
1959 if dep.atom is not None and dep.parent is not None:
1960 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1963 for parent_atom in arg_atoms:
1964 parent, atom = parent_atom
1965 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1966 self._add_parent_atom(pkg, parent_atom)
1968 # This section determines whether we go deeper into dependencies or not.
1969 # We want to go deeper on a few occasions:
1970 # Installing package A, we need to make sure package A's deps are met.
1971 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1972 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1973 if arg_atoms and depth > 0:
1974 for parent, atom in arg_atoms:
1975 if parent.reset_depth:
1979 if previously_added and pkg.depth is not None:
1980 depth = min(pkg.depth, depth)
1982 deep = self._dynamic_config.myparams.get("deep", 0)
1983 update = "--update" in self._frozen_config.myopts
1985 dep.want_update = (not self._dynamic_config._complete_mode and
1986 (arg_atoms or update) and
1987 not (deep is not True and depth > deep))
1990 if (not pkg.onlydeps and
1991 dep.atom and dep.atom.slot_operator is not None):
1992 self._add_slot_operator_dep(dep)
1994 recurse = deep is True or depth + 1 <= deep
1995 dep_stack = self._dynamic_config._dep_stack
1996 if "recurse" not in self._dynamic_config.myparams:
1998 elif pkg.installed and not recurse:
1999 dep_stack = self._dynamic_config._ignored_deps
2001 self._spinner_update()
2003 if not previously_added:
2004 dep_stack.append(pkg)
2007 def _check_masks(self, pkg):
2009 slot_key = (pkg.root, pkg.slot_atom)
2011 # Check for upgrades in the same slot that are
2012 # masked due to a LICENSE change in a newer
2013 # version that is not masked for any other reason.
2014 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
2015 if other_pkg is not None and pkg < other_pkg:
2016 self._dynamic_config._masked_license_updates.add(other_pkg)
2018 def _add_parent_atom(self, pkg, parent_atom):
2019 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
2020 if parent_atoms is None:
2021 parent_atoms = set()
2022 self._dynamic_config._parent_atoms[pkg] = parent_atoms
2023 parent_atoms.add(parent_atom)
2025 def _add_slot_operator_dep(self, dep):
2026 slot_key = (dep.root, dep.child.slot_atom)
2027 slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
2028 if slot_info is None:
2030 self._dynamic_config._slot_operator_deps[slot_key] = slot_info
2031 slot_info.append(dep)
2033 def _add_slot_conflict(self, pkg):
2034 self._dynamic_config._slot_collision_nodes.add(pkg)
2035 slot_key = (pkg.slot_atom, pkg.root)
2036 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
2037 if slot_nodes is None:
2039 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
2040 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
2043 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2046 metadata = pkg._metadata
2047 removal_action = "remove" in self._dynamic_config.myparams
2048 eapi_attrs = _get_eapi_attrs(pkg.eapi)
2051 for k in Package._dep_keys:
2052 edepend[k] = metadata[k]
2054 if not pkg.built and \
2055 "--buildpkgonly" in self._frozen_config.myopts and \
2056 "deep" not in self._dynamic_config.myparams:
2057 edepend["RDEPEND"] = ""
2058 edepend["PDEPEND"] = ""
2060 ignore_build_time_deps = False
2061 if pkg.built and not removal_action:
2062 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
2063 # Pull in build time deps as requested, but marked them as
2064 # "optional" since they are not strictly required. This allows
2065 # more freedom in the merge order calculation for solving
2066 # circular dependencies. Don't convert to PDEPEND since that
2067 # could make --with-bdeps=y less effective if it is used to
2068 # adjust merge order to prevent built_with_use() calls from
2072 ignore_build_time_deps = True
2074 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
2075 # Removal actions never traverse ignored buildtime
2076 # dependencies, so it's safe to discard them early.
2077 edepend["DEPEND"] = ""
2078 edepend["HDEPEND"] = ""
2079 ignore_build_time_deps = True
2081 ignore_depend_deps = ignore_build_time_deps
2082 ignore_hdepend_deps = ignore_build_time_deps
2085 depend_root = myroot
2087 if eapi_attrs.hdepend:
2088 depend_root = myroot
2090 depend_root = self._frozen_config._running_root.root
2091 root_deps = self._frozen_config.myopts.get("--root-deps")
2092 if root_deps is not None:
2093 if root_deps is True:
2094 depend_root = myroot
2095 elif root_deps == "rdeps":
2096 ignore_depend_deps = True
2098 # If rebuild mode is not enabled, it's safe to discard ignored
2099 # build-time dependencies. If you want these deps to be traversed
2100 # in "complete" mode then you need to specify --with-bdeps=y.
2101 if not self._rebuild.rebuild:
2102 if ignore_depend_deps:
2103 edepend["DEPEND"] = ""
2104 if ignore_hdepend_deps:
2105 edepend["HDEPEND"] = ""
2108 (depend_root, edepend["DEPEND"],
2109 self._priority(buildtime=True,
2110 optional=(pkg.built or ignore_depend_deps),
2111 ignored=ignore_depend_deps)),
2112 (self._frozen_config._running_root.root, edepend["HDEPEND"],
2113 self._priority(buildtime=True,
2114 optional=(pkg.built or ignore_hdepend_deps),
2115 ignored=ignore_hdepend_deps)),
2116 (myroot, edepend["RDEPEND"],
2117 self._priority(runtime=True)),
2118 (myroot, edepend["PDEPEND"],
2119 self._priority(runtime_post=True))
2122 debug = "--debug" in self._frozen_config.myopts
2124 for dep_root, dep_string, dep_priority in deps:
2128 writemsg_level("\nParent: %s\n" % (pkg,),
2129 noiselevel=-1, level=logging.DEBUG)
2130 writemsg_level("Depstring: %s\n" % (dep_string,),
2131 noiselevel=-1, level=logging.DEBUG)
2132 writemsg_level("Priority: %s\n" % (dep_priority,),
2133 noiselevel=-1, level=logging.DEBUG)
2136 dep_string = portage.dep.use_reduce(dep_string,
2137 uselist=self._pkg_use_enabled(pkg),
2138 is_valid_flag=pkg.iuse.is_valid_flag,
2139 opconvert=True, token_class=Atom,
2141 except portage.exception.InvalidDependString as e:
2142 if not pkg.installed:
2143 # should have been masked before it was selected
2147 # Try again, but omit the is_valid_flag argument, since
2148 # invalid USE conditionals are a common problem and it's
2149 # practical to ignore this issue for installed packages.
2151 dep_string = portage.dep.use_reduce(dep_string,
2152 uselist=self._pkg_use_enabled(pkg),
2153 opconvert=True, token_class=Atom,
2155 except portage.exception.InvalidDependString as e:
2156 self._dynamic_config._masked_installed.add(pkg)
2161 dep_string = list(self._queue_disjunctive_deps(
2162 pkg, dep_root, dep_priority, dep_string))
2163 except portage.exception.InvalidDependString as e:
2165 self._dynamic_config._masked_installed.add(pkg)
2169 # should have been masked before it was selected
2175 if not self._add_pkg_dep_string(
2176 pkg, dep_root, dep_priority, dep_string,
2180 self._dynamic_config._traversed_pkg_deps.add(pkg)
2183 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2185 _autounmask_backup = self._dynamic_config._autounmask
2186 if dep_priority.optional or dep_priority.ignored:
2187 # Temporarily disable autounmask for deps that
2188 # don't necessarily need to be satisfied.
2189 self._dynamic_config._autounmask = False
2191 return self._wrapped_add_pkg_dep_string(
2192 pkg, dep_root, dep_priority, dep_string,
2195 self._dynamic_config._autounmask = _autounmask_backup
2197 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
2198 dep_string, allow_unsatisfied):
2199 depth = pkg.depth + 1
2200 deep = self._dynamic_config.myparams.get("deep", 0)
2201 recurse_satisfied = deep is True or depth <= deep
2202 debug = "--debug" in self._frozen_config.myopts
2203 strict = pkg.type_name != "installed"
2206 writemsg_level("\nParent: %s\n" % (pkg,),
2207 noiselevel=-1, level=logging.DEBUG)
2208 dep_repr = portage.dep.paren_enclose(dep_string,
2209 unevaluated_atom=True, opconvert=True)
2210 writemsg_level("Depstring: %s\n" % (dep_repr,),
2211 noiselevel=-1, level=logging.DEBUG)
2212 writemsg_level("Priority: %s\n" % (dep_priority,),
2213 noiselevel=-1, level=logging.DEBUG)
2216 selected_atoms = self._select_atoms(dep_root,
2217 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
2218 strict=strict, priority=dep_priority)
2219 except portage.exception.InvalidDependString:
2221 self._dynamic_config._masked_installed.add(pkg)
2224 # should have been masked before it was selected
2228 writemsg_level("Candidates: %s\n" % \
2229 ([str(x) for x in selected_atoms[pkg]],),
2230 noiselevel=-1, level=logging.DEBUG)
2232 root_config = self._frozen_config.roots[dep_root]
2233 vardb = root_config.trees["vartree"].dbapi
2234 traversed_virt_pkgs = set()
2236 reinstall_atoms = self._frozen_config.reinstall_atoms
2237 for atom, child in self._minimize_children(
2238 pkg, dep_priority, root_config, selected_atoms[pkg]):
2240 # If this was a specially generated virtual atom
2241 # from dep_check, map it back to the original, in
2242 # order to avoid distortion in places like display
2243 # or conflict resolution code.
2244 is_virt = hasattr(atom, '_orig_atom')
2245 atom = getattr(atom, '_orig_atom', atom)
2247 if atom.blocker and \
2248 (dep_priority.optional or dep_priority.ignored):
2249 # For --with-bdeps, ignore build-time only blockers
2250 # that originate from built packages.
2253 mypriority = dep_priority.copy()
2254 if not atom.blocker:
2255 inst_pkgs = [inst_pkg for inst_pkg in
2256 reversed(vardb.match_pkgs(atom))
2257 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2258 modified_use=self._pkg_use_enabled(inst_pkg))]
2260 for inst_pkg in inst_pkgs:
2261 if self._pkg_visibility_check(inst_pkg):
2263 mypriority.satisfied = inst_pkg
2265 if not mypriority.satisfied:
2266 # none visible, so use highest
2267 mypriority.satisfied = inst_pkgs[0]
2269 dep = Dependency(atom=atom,
2270 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
2271 priority=mypriority, root=dep_root)
2273 # In some cases, dep_check will return deps that shouldn't
2274 # be proccessed any further, so they are identified and
2275 # discarded here. Try to discard as few as possible since
2276 # discarded dependencies reduce the amount of information
2277 # available for optimization of merge order.
2279 if not atom.blocker and \
2280 not recurse_satisfied and \
2281 mypriority.satisfied and \
2282 mypriority.satisfied.visible and \
2283 dep.child is not None and \
2284 not dep.child.installed and \
2285 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2286 dep.child.slot_atom) is None:
2289 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2290 except InvalidDependString:
2291 if not dep.child.installed:
2295 # Existing child selection may not be valid unless
2296 # it's added to the graph immediately, since "complete"
2297 # mode may select a different child later.
2300 self._dynamic_config._ignored_deps.append(dep)
2303 if dep_priority.ignored and \
2304 not self._dynamic_config._traverse_ignored_deps:
2305 if is_virt and dep.child is not None:
2306 traversed_virt_pkgs.add(dep.child)
2308 self._dynamic_config._ignored_deps.append(dep)
2310 if not self._add_dep(dep,
2311 allow_unsatisfied=allow_unsatisfied):
2313 if is_virt and dep.child is not None:
2314 traversed_virt_pkgs.add(dep.child)
2316 selected_atoms.pop(pkg)
2318 # Add selected indirect virtual deps to the graph. This
2319 # takes advantage of circular dependency avoidance that's done
2320 # by dep_zapdeps. We preserve actual parent/child relationships
2321 # here in order to avoid distorting the dependency graph like
2322 # <=portage-2.1.6.x did.
2323 for virt_dep, atoms in selected_atoms.items():
2325 virt_pkg = virt_dep.child
2326 if virt_pkg not in traversed_virt_pkgs:
2330 writemsg_level("\nCandidates: %s: %s\n" % \
2331 (virt_pkg.cpv, [str(x) for x in atoms]),
2332 noiselevel=-1, level=logging.DEBUG)
2334 if not dep_priority.ignored or \
2335 self._dynamic_config._traverse_ignored_deps:
2337 inst_pkgs = [inst_pkg for inst_pkg in
2338 reversed(vardb.match_pkgs(virt_dep.atom))
2339 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2340 modified_use=self._pkg_use_enabled(inst_pkg))]
2342 for inst_pkg in inst_pkgs:
2343 if self._pkg_visibility_check(inst_pkg):
2345 virt_dep.priority.satisfied = inst_pkg
2347 if not virt_dep.priority.satisfied:
2348 # none visible, so use highest
2349 virt_dep.priority.satisfied = inst_pkgs[0]
2351 if not self._add_pkg(virt_pkg, virt_dep):
2354 for atom, child in self._minimize_children(
2355 pkg, self._priority(runtime=True), root_config, atoms):
2357 # If this was a specially generated virtual atom
2358 # from dep_check, map it back to the original, in
2359 # order to avoid distortion in places like display
2360 # or conflict resolution code.
2361 is_virt = hasattr(atom, '_orig_atom')
2362 atom = getattr(atom, '_orig_atom', atom)
2364 # This is a GLEP 37 virtual, so its deps are all runtime.
2365 mypriority = self._priority(runtime=True)
2366 if not atom.blocker:
2367 inst_pkgs = [inst_pkg for inst_pkg in
2368 reversed(vardb.match_pkgs(atom))
2369 if not reinstall_atoms.findAtomForPackage(inst_pkg,
2370 modified_use=self._pkg_use_enabled(inst_pkg))]
2372 for inst_pkg in inst_pkgs:
2373 if self._pkg_visibility_check(inst_pkg):
2375 mypriority.satisfied = inst_pkg
2377 if not mypriority.satisfied:
2378 # none visible, so use highest
2379 mypriority.satisfied = inst_pkgs[0]
2381 # Dependencies of virtuals are considered to have the
2382 # same depth as the virtual itself.
2383 dep = Dependency(atom=atom,
2384 blocker=atom.blocker, child=child, depth=virt_dep.depth,
2385 parent=virt_pkg, priority=mypriority, root=dep_root,
2386 collapsed_parent=pkg, collapsed_priority=dep_priority)
2389 if not atom.blocker and \
2390 not recurse_satisfied and \
2391 mypriority.satisfied and \
2392 mypriority.satisfied.visible and \
2393 dep.child is not None and \
2394 not dep.child.installed and \
2395 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2396 dep.child.slot_atom) is None:
2399 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2400 except InvalidDependString:
2401 if not dep.child.installed:
2407 self._dynamic_config._ignored_deps.append(dep)
2410 if dep_priority.ignored and \
2411 not self._dynamic_config._traverse_ignored_deps:
2412 if is_virt and dep.child is not None:
2413 traversed_virt_pkgs.add(dep.child)
2415 self._dynamic_config._ignored_deps.append(dep)
2417 if not self._add_dep(dep,
2418 allow_unsatisfied=allow_unsatisfied):
2420 if is_virt and dep.child is not None:
2421 traversed_virt_pkgs.add(dep.child)
2424 writemsg_level("\nExiting... %s\n" % (pkg,),
2425 noiselevel=-1, level=logging.DEBUG)
2429 def _minimize_children(self, parent, priority, root_config, atoms):
2431 Selects packages to satisfy the given atoms, and minimizes the
2432 number of selected packages. This serves to identify and eliminate
2433 redundant package selections when multiple atoms happen to specify
2443 dep_pkg, existing_node = self._select_package(
2444 root_config.root, atom)
2448 atom_pkg_map[atom] = dep_pkg
2450 if len(atom_pkg_map) < 2:
2451 for item in atom_pkg_map.items():
2457 for atom, pkg in atom_pkg_map.items():
2458 pkg_atom_map.setdefault(pkg, set()).add(atom)
2459 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
2461 for pkgs in cp_pkg_map.values():
2464 for atom in pkg_atom_map[pkg]:
2468 # Use a digraph to identify and eliminate any
2469 # redundant package selections.
2470 atom_pkg_graph = digraph()
2473 for atom in pkg_atom_map[pkg1]:
2475 atom_pkg_graph.add(pkg1, atom)
2476 atom_set = InternalPackageSet(initial_atoms=(atom,),
2481 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
2482 atom_pkg_graph.add(pkg2, atom)
2485 eliminate_pkg = True
2486 for atom in atom_pkg_graph.parent_nodes(pkg):
2487 if len(atom_pkg_graph.child_nodes(atom)) < 2:
2488 eliminate_pkg = False
2491 atom_pkg_graph.remove(pkg)
2493 # Yield ~, =*, < and <= atoms first, since those are more likely to
2494 # cause slot conflicts, and we want those atoms to be displayed
2495 # in the resulting slot conflict message (see bug #291142).
2496 # Give similar treatment to slot/sub-slot atoms.
2500 for atom in cp_atoms:
2501 if atom.slot_operator_built:
2502 abi_atoms.append(atom)
2505 for child_pkg in atom_pkg_graph.child_nodes(atom):
2506 existing_node, matches = \
2507 self._check_slot_conflict(child_pkg, atom)
2508 if existing_node and not matches:
2512 conflict_atoms.append(atom)
2514 normal_atoms.append(atom)
2516 for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
2517 child_pkgs = atom_pkg_graph.child_nodes(atom)
2518 # if more than one child, yield highest version
2519 if len(child_pkgs) > 1:
2521 yield (atom, child_pkgs[-1])
2523 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2525 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
2526 Yields non-disjunctive deps. Raises InvalidDependString when
2529 for x in dep_struct:
2530 if isinstance(x, list):
2531 if x and x[0] == "||":
2532 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2534 for y in self._queue_disjunctive_deps(
2535 pkg, dep_root, dep_priority, x):
2538 # Note: Eventually this will check for PROPERTIES=virtual
2539 # or whatever other metadata gets implemented for this
2541 if x.cp.startswith('virtual/'):
2542 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2546 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2547 self._dynamic_config._dep_disjunctive_stack.append(
2548 (pkg, dep_root, dep_priority, dep_struct))
2550 def _pop_disjunction(self, allow_unsatisfied):
2552 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
2553 populate self._dynamic_config._dep_stack.
2555 pkg, dep_root, dep_priority, dep_struct = \
2556 self._dynamic_config._dep_disjunctive_stack.pop()
2557 if not self._add_pkg_dep_string(
2558 pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
2562 def _priority(self, **kwargs):
2563 if "remove" in self._dynamic_config.myparams:
2564 priority_constructor = UnmergeDepPriority
2566 priority_constructor = DepPriority
2567 return priority_constructor(**kwargs)
2569 def _dep_expand(self, root_config, atom_without_category):
2571 @param root_config: a root config instance
2572 @type root_config: RootConfig
2573 @param atom_without_category: an atom without a category component
2574 @type atom_without_category: String
2576 @return: a list of atoms containing categories (possibly empty)
2578 null_cp = portage.dep_getkey(insert_category_into_atom(
2579 atom_without_category, "null"))
2580 cat, atom_pn = portage.catsplit(null_cp)
2582 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
2584 for db, pkg_type, built, installed, db_keys in dbs:
2585 for cat in db.categories:
2586 if db.cp_list("%s/%s" % (cat, atom_pn)):
2590 for cat in categories:
2591 deps.append(Atom(insert_category_into_atom(
2592 atom_without_category, cat), allow_repo=True))
2595 def _have_new_virt(self, root, atom_cp):
2597 for db, pkg_type, built, installed, db_keys in \
2598 self._dynamic_config._filtered_trees[root]["dbs"]:
2599 if db.cp_list(atom_cp):
2604 def _iter_atoms_for_pkg(self, pkg):
2605 depgraph_sets = self._dynamic_config.sets[pkg.root]
2606 atom_arg_map = depgraph_sets.atom_arg_map
2607 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
2608 if atom.cp != pkg.cp and \
2609 self._have_new_virt(pkg.root, atom.cp):
2612 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
2613 visible_pkgs.reverse() # descending order
2615 for visible_pkg in visible_pkgs:
2616 if visible_pkg.cp != atom.cp:
2618 if pkg >= visible_pkg:
2619 # This is descending order, and we're not
2620 # interested in any versions <= pkg given.
2622 if pkg.slot_atom != visible_pkg.slot_atom:
2623 higher_slot = visible_pkg
2625 if higher_slot is not None:
2627 for arg in atom_arg_map[(atom, pkg.root)]:
2628 if isinstance(arg, PackageArg) and \
2633 def select_files(self, args):
2634 # Use the global event loop for spinner progress
2635 # indication during file owner lookups (bug #461412).
2638 spinner = self._frozen_config.spinner
2639 if spinner is not None and \
2640 spinner.update is not spinner.update_quiet:
2641 spinner_id = self._event_loop.idle_add(
2642 self._frozen_config.spinner.update)
2643 return self._select_files(args)
2645 if spinner_id is not None:
2646 self._event_loop.source_remove(spinner_id)
2648 def _select_files(self, myfiles):
2649 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
2650 self._dynamic_config._initial_arg_list and call self._resolve to create the
2651 appropriate depgraph and return a favorite list."""
2653 debug = "--debug" in self._frozen_config.myopts
2654 root_config = self._frozen_config.roots[self._frozen_config.target_root]
2655 sets = root_config.sets
2656 depgraph_sets = self._dynamic_config.sets[root_config.root]
2658 eroot = root_config.root
2659 root = root_config.settings['ROOT']
2660 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
2661 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
2662 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
2663 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
2664 pkgsettings = self._frozen_config.pkgsettings[eroot]
2666 onlydeps = "--onlydeps" in self._frozen_config.myopts
2669 ext = os.path.splitext(x)[1]
2671 if not os.path.exists(x):
2673 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2674 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2675 elif os.path.exists(
2676 os.path.join(pkgsettings["PKGDIR"], x)):
2677 x = os.path.join(pkgsettings["PKGDIR"], x)
2679 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
2680 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
2681 return 0, myfavorites
2682 mytbz2=portage.xpak.tbz2(x)
2684 cat = mytbz2.getfile("CATEGORY")
2686 cat = _unicode_decode(cat.strip(),
2687 encoding=_encodings['repo.content'])
2688 mykey = cat + "/" + os.path.basename(x)[:-5]
2691 writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
2692 self._dynamic_config._skip_restart = True
2693 return 0, myfavorites
2694 elif os.path.realpath(x) != \
2695 os.path.realpath(bindb.bintree.getname(mykey)):
2696 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2697 self._dynamic_config._skip_restart = True
2698 return 0, myfavorites
2700 pkg = self._pkg(mykey, "binary", root_config,
2702 args.append(PackageArg(arg=x, package=pkg,
2703 root_config=root_config))
2704 elif ext==".ebuild":
2705 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2706 pkgdir = os.path.dirname(ebuild_path)
2707 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2708 cp = pkgdir[len(tree_root)+1:]
2709 error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
2710 "hierarchy or does not exist\n") % x
2711 if not portage.isvalidatom(cp):
2712 writemsg(error_msg, noiselevel=-1)
2713 return 0, myfavorites
2714 cat = portage.catsplit(cp)[0]
2715 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2716 if not portage.isvalidatom("="+mykey):
2717 writemsg(error_msg, noiselevel=-1)
2718 return 0, myfavorites
2719 ebuild_path = portdb.findname(mykey)
2721 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2722 cp, os.path.basename(ebuild_path)):
2723 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2724 self._dynamic_config._skip_restart = True
2725 return 0, myfavorites
2726 if mykey not in portdb.xmatch(
2727 "match-visible", portage.cpv_getkey(mykey)):
2728 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2729 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2730 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2731 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2734 writemsg(error_msg, noiselevel=-1)
2735 return 0, myfavorites
2736 pkg = self._pkg(mykey, "ebuild", root_config,
2737 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2738 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2739 args.append(PackageArg(arg=x, package=pkg,
2740 root_config=root_config))
2741 elif x.startswith(os.path.sep):
2742 if not x.startswith(eroot):
2743 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2744 " $EROOT.\n") % x, noiselevel=-1)
2745 self._dynamic_config._skip_restart = True
2747 # Queue these up since it's most efficient to handle
2748 # multiple files in a single iter_owners() call.
2749 lookup_owners.append(x)
2750 elif x.startswith("." + os.sep) or \
2751 x.startswith(".." + os.sep):
2752 f = os.path.abspath(x)
2753 if not f.startswith(eroot):
2754 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2755 " $EROOT.\n") % (f, x), noiselevel=-1)
2756 self._dynamic_config._skip_restart = True
2758 lookup_owners.append(f)
2760 if x in ("system", "world"):
2762 if x.startswith(SETPREFIX):
2763 s = x[len(SETPREFIX):]
2765 raise portage.exception.PackageSetNotFound(s)
2766 if s in depgraph_sets.sets:
2770 set_atoms = root_config.setconfig.getSetAtoms(s)
2771 except portage.exception.PackageSetNotFound as e:
2772 writemsg_level("\n\n", level=logging.ERROR,
2774 for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
2775 for error_msg in pset.errors:
2776 writemsg_level("%s\n" % (error_msg,),
2777 level=logging.ERROR, noiselevel=-1)
2779 writemsg_level(("emerge: the given set '%s' "
2780 "contains a non-existent set named '%s'.\n") % \
2781 (s, e), level=logging.ERROR, noiselevel=-1)
2782 if s in ('world', 'selected') and \
2783 SETPREFIX + e.value in sets['selected']:
2784 writemsg_level(("Use `emerge --deselect %s%s` to "
2785 "remove this set from world_sets.\n") %
2786 (SETPREFIX, e,), level=logging.ERROR,
2788 writemsg_level("\n", level=logging.ERROR,
2790 return False, myfavorites
2793 depgraph_sets.sets[s] = pset
2794 args.append(SetArg(arg=x, pset=pset,
2795 root_config=root_config))
2797 if not is_valid_package_atom(x, allow_repo=True):
2798 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2800 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2801 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2802 self._dynamic_config._skip_restart = True
2804 # Don't expand categories or old-style virtuals here unless
2805 # necessary. Expansion of old-style virtuals here causes at
2806 # least the following problems:
2807 # 1) It's more difficult to determine which set(s) an atom
2808 # came from, if any.
2809 # 2) It takes away freedom from the resolver to choose other
2810 # possible expansions when necessary.
2811 if "/" in x.split(":")[0]:
2812 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2813 root_config=root_config))
2815 expanded_atoms = self._dep_expand(root_config, x)
2816 installed_cp_set = set()
2817 for atom in expanded_atoms:
2818 if vardb.cp_list(atom.cp):
2819 installed_cp_set.add(atom.cp)
2821 if len(installed_cp_set) > 1:
2822 non_virtual_cps = set()
2823 for atom_cp in installed_cp_set:
2824 if not atom_cp.startswith("virtual/"):
2825 non_virtual_cps.add(atom_cp)
2826 if len(non_virtual_cps) == 1:
2827 installed_cp_set = non_virtual_cps
2829 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2830 installed_cp = next(iter(installed_cp_set))
2831 for atom in expanded_atoms:
2832 if atom.cp == installed_cp:
2834 for pkg in self._iter_match_pkgs_any(
2835 root_config, atom.without_use,
2837 if not pkg.installed:
2841 expanded_atoms = [atom]
2844 # If a non-virtual package and one or more virtual packages
2845 # are in expanded_atoms, use the non-virtual package.
2846 if len(expanded_atoms) > 1:
2847 number_of_virtuals = 0
2848 for expanded_atom in expanded_atoms:
2849 if expanded_atom.cp.startswith("virtual/"):
2850 number_of_virtuals += 1
2852 candidate = expanded_atom
2853 if len(expanded_atoms) - number_of_virtuals == 1:
2854 expanded_atoms = [ candidate ]
2856 if len(expanded_atoms) > 1:
2857 writemsg("\n\n", noiselevel=-1)
2858 ambiguous_package_name(x, expanded_atoms, root_config,
2859 self._frozen_config.spinner, self._frozen_config.myopts)
2860 self._dynamic_config._skip_restart = True
2861 return False, myfavorites
2863 atom = expanded_atoms[0]
2865 null_atom = Atom(insert_category_into_atom(x, "null"),
2867 cat, atom_pn = portage.catsplit(null_atom.cp)
2868 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2870 # Allow the depgraph to choose which virtual.
2871 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2876 if atom.use and atom.use.conditional:
2878 ("\n\n!!! '%s' contains a conditional " + \
2879 "which is not allowed.\n") % (x,), noiselevel=-1)
2880 writemsg("!!! Please check ebuild(5) for full details.\n")
2881 self._dynamic_config._skip_restart = True
2884 args.append(AtomArg(arg=x, atom=atom,
2885 root_config=root_config))
2889 search_for_multiple = False
2890 if len(lookup_owners) > 1:
2891 search_for_multiple = True
2893 for x in lookup_owners:
2894 if not search_for_multiple and os.path.isdir(x):
2895 search_for_multiple = True
2896 relative_paths.append(x[len(root)-1:])
2899 for pkg, relative_path in \
2900 real_vardb._owners.iter_owners(relative_paths):
2901 owners.add(pkg.mycpv)
2902 if not search_for_multiple:
2906 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2907 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2908 self._dynamic_config._skip_restart = True
2912 pkg = vardb._pkg_str(cpv, None)
2913 atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
2914 args.append(AtomArg(arg=atom, atom=atom,
2915 root_config=root_config))
2917 if "--update" in self._frozen_config.myopts:
2918 # In some cases, the greedy slots behavior can pull in a slot that
2919 # the user would want to uninstall due to it being blocked by a
2920 # newer version in a different slot. Therefore, it's necessary to
2921 # detect and discard any that should be uninstalled. Each time
2922 # that arguments are updated, package selections are repeated in
2923 # order to ensure consistency with the current arguments:
2925 # 1) Initialize args
2926 # 2) Select packages and generate initial greedy atoms
2927 # 3) Update args with greedy atoms
2928 # 4) Select packages and generate greedy atoms again, while
2929 # accounting for any blockers between selected packages
2930 # 5) Update args with revised greedy atoms
2932 self._set_args(args)
2935 greedy_args.append(arg)
2936 if not isinstance(arg, AtomArg):
2938 for atom in self._greedy_slots(arg.root_config, arg.atom):
2940 AtomArg(arg=arg.arg, atom=atom,
2941 root_config=arg.root_config))
2943 self._set_args(greedy_args)
2946 # Revise greedy atoms, accounting for any blockers
2947 # between selected packages.
2948 revised_greedy_args = []
2950 revised_greedy_args.append(arg)
2951 if not isinstance(arg, AtomArg):
2953 for atom in self._greedy_slots(arg.root_config, arg.atom,
2954 blocker_lookahead=True):
2955 revised_greedy_args.append(
2956 AtomArg(arg=arg.arg, atom=atom,
2957 root_config=arg.root_config))
2958 args = revised_greedy_args
2959 del revised_greedy_args
2961 args.extend(self._gen_reinstall_sets())
2962 self._set_args(args)
2964 myfavorites = set(myfavorites)
2966 if isinstance(arg, (AtomArg, PackageArg)):
2967 myfavorites.add(arg.atom)
2968 elif isinstance(arg, SetArg):
2969 if not arg.internal:
2970 myfavorites.add(arg.arg)
2971 myfavorites = list(myfavorites)
2974 portage.writemsg("\n", noiselevel=-1)
2975 # Order needs to be preserved since a feature of --nodeps
2976 # is to allow the user to force a specific merge order.
2977 self._dynamic_config._initial_arg_list = args[:]
2979 return self._resolve(myfavorites)
2981 def _gen_reinstall_sets(self):
2984 for root, atom in self._rebuild.rebuild_list:
2985 atom_list.append((root, '__auto_rebuild__', atom))
2986 for root, atom in self._rebuild.reinstall_list:
2987 atom_list.append((root, '__auto_reinstall__', atom))
2988 for root, atom in self._dynamic_config._slot_operator_replace_installed:
2989 atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
2992 for root, set_name, atom in atom_list:
2993 set_dict.setdefault((root, set_name), []).append(atom)
2995 for (root, set_name), atoms in set_dict.items():
2996 yield SetArg(arg=(SETPREFIX + set_name),
2997 # Set reset_depth=False here, since we don't want these
2998 # special sets to interact with depth calculations (see
2999 # the emerge --deep=DEPTH option), though we want them
3000 # to behave like normal arguments in most other respects.
3001 pset=InternalPackageSet(initial_atoms=atoms),
3002 force_reinstall=True,
3005 root_config=self._frozen_config.roots[root])
3007 def _resolve(self, myfavorites):
3008 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
3009 call self._creategraph to process theier deps and return
3011 debug = "--debug" in self._frozen_config.myopts
3012 onlydeps = "--onlydeps" in self._frozen_config.myopts
3013 myroot = self._frozen_config.target_root
3014 pkgsettings = self._frozen_config.pkgsettings[myroot]
3015 pprovideddict = pkgsettings.pprovideddict
3016 virtuals = pkgsettings.getvirtuals()
3017 args = self._dynamic_config._initial_arg_list[:]
3019 for arg in self._expand_set_args(args, add_to_digraph=True):
3020 for atom in arg.pset.getAtoms():
3021 self._spinner_update()
3022 dep = Dependency(atom=atom, onlydeps=onlydeps,
3023 root=myroot, parent=arg)
3025 pprovided = pprovideddict.get(atom.cp)
3026 if pprovided and portage.match_from_list(atom, pprovided):
3027 # A provided package has been specified on the command line.
3028 self._dynamic_config._pprovided_args.append((arg, atom))
3030 if isinstance(arg, PackageArg):
3031 if not self._add_pkg(arg.package, dep) or \
3032 not self._create_graph():
3033 if not self.need_restart():
3034 sys.stderr.write(("\n\n!!! Problem " + \
3035 "resolving dependencies for %s\n") % \
3037 return 0, myfavorites
3040 writemsg_level("\n Arg: %s\n Atom: %s\n" %
3041 (arg, atom), noiselevel=-1, level=logging.DEBUG)
3042 pkg, existing_node = self._select_package(
3043 myroot, atom, onlydeps=onlydeps)
3045 pprovided_match = False
3046 for virt_choice in virtuals.get(atom.cp, []):
3047 expanded_atom = portage.dep.Atom(
3048 atom.replace(atom.cp, virt_choice.cp, 1))
3049 pprovided = pprovideddict.get(expanded_atom.cp)
3051 portage.match_from_list(expanded_atom, pprovided):
3052 # A provided package has been
3053 # specified on the command line.
3054 self._dynamic_config._pprovided_args.append((arg, atom))
3055 pprovided_match = True
3061 for any_match in self._iter_match_pkgs_any(
3062 self._frozen_config.roots[myroot], atom):
3063 if self._frozen_config.excluded_pkgs.findAtomForPackage(
3064 any_match, modified_use=self._pkg_use_enabled(any_match)):
3070 if not (isinstance(arg, SetArg) and \
3071 arg.name in ("selected", "system", "world")):
3072 self._dynamic_config._unsatisfied_deps_for_display.append(
3073 ((myroot, atom), {"myparent" : arg}))
3074 return 0, myfavorites
3076 self._dynamic_config._missing_args.append((arg, atom))
3078 if atom.cp != pkg.cp:
3079 # For old-style virtuals, we need to repeat the
3080 # package.provided check against the selected package.
3081 expanded_atom = atom.replace(atom.cp, pkg.cp)
3082 pprovided = pprovideddict.get(pkg.cp)
3084 portage.match_from_list(expanded_atom, pprovided):
3085 # A provided package has been
3086 # specified on the command line.
3087 self._dynamic_config._pprovided_args.append((arg, atom))
3089 if pkg.installed and \
3090 "selective" not in self._dynamic_config.myparams and \
3091 not self._frozen_config.excluded_pkgs.findAtomForPackage(
3092 pkg, modified_use=self._pkg_use_enabled(pkg)):
3093 self._dynamic_config._unsatisfied_deps_for_display.append(
3094 ((myroot, atom), {"myparent" : arg}))
3095 # Previous behavior was to bail out in this case, but
3096 # since the dep is satisfied by the installed package,
3097 # it's more friendly to continue building the graph
3098 # and just show a warning message. Therefore, only bail
3099 # out here if the atom is not from either the system or
3101 if not (isinstance(arg, SetArg) and \
3102 arg.name in ("selected", "system", "world")):
3103 return 0, myfavorites
3105 # Add the selected package to the graph as soon as possible
3106 # so that later dep_check() calls can use it as feedback
3107 # for making more consistent atom selections.
3108 if not self._add_pkg(pkg, dep):
3109 if self.need_restart():
3111 elif isinstance(arg, SetArg):
3112 writemsg(("\n\n!!! Problem resolving " + \
3113 "dependencies for %s from %s\n") % \
3114 (atom, arg.arg), noiselevel=-1)
3116 writemsg(("\n\n!!! Problem resolving " + \
3117 "dependencies for %s\n") % \
3118 (atom,), noiselevel=-1)
3119 return 0, myfavorites
3121 except SystemExit as e:
3122 raise # Needed else can't exit
3123 except Exception as e:
3124 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
3125 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
3128 # Now that the root packages have been added to the graph,
3129 # process the dependencies.
3130 if not self._create_graph():
3131 return 0, myfavorites
3135 except self._unknown_internal_error:
3136 return False, myfavorites
3138 if (self._dynamic_config._slot_collision_info and
3139 not self._accept_blocker_conflicts()) or \
3140 (self._dynamic_config._allow_backtracking and
3141 "slot conflict" in self._dynamic_config._backtrack_infos):
3142 return False, myfavorites
3144 if self._rebuild.trigger_rebuilds():
3145 backtrack_infos = self._dynamic_config._backtrack_infos
3146 config = backtrack_infos.setdefault("config", {})
3147 config["rebuild_list"] = self._rebuild.rebuild_list
3148 config["reinstall_list"] = self._rebuild.reinstall_list
3149 self._dynamic_config._need_restart = True
3150 return False, myfavorites
3152 if "config" in self._dynamic_config._backtrack_infos and \
3153 ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
3154 "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
3155 self.need_restart():
3156 return False, myfavorites
3158 if not self._dynamic_config._prune_rebuilds and \
3159 self._dynamic_config._slot_operator_replace_installed and \
3160 self._get_missed_updates():
3161 # When there are missed updates, we might have triggered
3162 # some unnecessary rebuilds (see bug #439688). So, prune
3163 # all the rebuilds and backtrack with the problematic
3164 # updates masked. The next backtrack run should pull in
3165 # any rebuilds that are really needed, and this
3166 # prune_rebuilds path should never be entered more than
3167 # once in a series of backtracking nodes (in order to
3168 # avoid a backtracking loop).
3169 backtrack_infos = self._dynamic_config._backtrack_infos
3170 config = backtrack_infos.setdefault("config", {})
3171 config["prune_rebuilds"] = True
3172 self._dynamic_config._need_restart = True
3173 return False, myfavorites
3175 if self.need_restart():
3176 # want_restart_for_use_change triggers this
3177 return False, myfavorites
3179 if "--fetchonly" not in self._frozen_config.myopts and \
3180 "--buildpkgonly" in self._frozen_config.myopts:
3181 graph_copy = self._dynamic_config.digraph.copy()
3182 removed_nodes = set()
3183 for node in graph_copy:
3184 if not isinstance(node, Package) or \
3185 node.operation == "nomerge":
3186 removed_nodes.add(node)
3187 graph_copy.difference_update(removed_nodes)
3188 if not graph_copy.hasallzeros(ignore_priority = \
3189 DepPrioritySatisfiedRange.ignore_medium):
3190 self._dynamic_config._buildpkgonly_deps_unsatisfied = True
3191 self._dynamic_config._skip_restart = True
3192 return False, myfavorites
3194 # Any failures except those due to autounmask *alone* should return
3195 # before this point, since the success_without_autounmask flag that's
3196 # set below is reserved for cases where there are *zero* other
3197 # problems. For reference, see backtrack_depgraph, where it skips the
3198 # get_best_run() call when success_without_autounmask is True.
3200 digraph_nodes = self._dynamic_config.digraph.nodes
3202 if any(x in digraph_nodes for x in
3203 self._dynamic_config._needed_unstable_keywords) or \
3204 any(x in digraph_nodes for x in
3205 self._dynamic_config._needed_p_mask_changes) or \
3206 any(x in digraph_nodes for x in
3207 self._dynamic_config._needed_use_config_changes) or \
3208 any(x in digraph_nodes for x in
3209 self._dynamic_config._needed_license_changes) :
3210 #We failed if the user needs to change the configuration
3211 self._dynamic_config._success_without_autounmask = True
3212 return False, myfavorites
3214 # We're true here unless we are missing binaries.
3215 return (True, myfavorites)
3217 def _set_args(self, args):
3219 Create the "__non_set_args__" package set from atoms and packages given as
3220 arguments. This method can be called multiple times if necessary.
3221 The package selection cache is automatically invalidated, since
3222 arguments influence package selections.
3227 for root in self._dynamic_config.sets:
3228 depgraph_sets = self._dynamic_config.sets[root]
3229 depgraph_sets.sets.setdefault('__non_set_args__',
3230 InternalPackageSet(allow_repo=True)).clear()
3231 depgraph_sets.atoms.clear()
3232 depgraph_sets.atom_arg_map.clear()
3233 set_atoms[root] = []
3234 non_set_atoms[root] = []
3236 # We don't add set args to the digraph here since that
3237 # happens at a later stage and we don't want to make
3238 # any state changes here that aren't reversed by a
3239 # another call to this method.
3240 for arg in self._expand_set_args(args, add_to_digraph=False):
3241 atom_arg_map = self._dynamic_config.sets[
3242 arg.root_config.root].atom_arg_map
3243 if isinstance(arg, SetArg):
3244 atom_group = set_atoms[arg.root_config.root]
3246 atom_group = non_set_atoms[arg.root_config.root]
3248 for atom in arg.pset.getAtoms():
3249 atom_group.append(atom)
3250 atom_key = (atom, arg.root_config.root)
3251 refs = atom_arg_map.get(atom_key)
3254 atom_arg_map[atom_key] = refs
3258 for root in self._dynamic_config.sets:
3259 depgraph_sets = self._dynamic_config.sets[root]
3260 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
3261 non_set_atoms.get(root, [])))
3262 depgraph_sets.sets['__non_set_args__'].update(
3263 non_set_atoms.get(root, []))
3265 # Invalidate the package selection cache, since
3266 # arguments influence package selections.
3267 self._dynamic_config._highest_pkg_cache.clear()
3268 for trees in self._dynamic_config._filtered_trees.values():
3269 trees["porttree"].dbapi._clear_cache()
3271 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3273 Return a list of slot atoms corresponding to installed slots that
3274 differ from the slot of the highest visible match. When
3275 blocker_lookahead is True, slot atoms that would trigger a blocker
3276 conflict are automatically discarded, potentially allowing automatic
3277 uninstallation of older slots when appropriate.
3279 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3280 if highest_pkg is None:
3282 vardb = root_config.trees["vartree"].dbapi
3284 for cpv in vardb.match(atom):
3285 # don't mix new virtuals with old virtuals
3286 pkg = vardb._pkg_str(cpv, None)
3287 if pkg.cp == highest_pkg.cp:
3290 slots.add(highest_pkg.slot)
3294 slots.remove(highest_pkg.slot)
3297 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3298 pkg, in_graph = self._select_package(root_config.root, slot_atom)
3299 if pkg is not None and \
3300 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3301 greedy_pkgs.append(pkg)
3304 if not blocker_lookahead:
3305 return [pkg.slot_atom for pkg in greedy_pkgs]
3308 blocker_dep_keys = Package._dep_keys
3309 for pkg in greedy_pkgs + [highest_pkg]:
3310 dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
3312 selected_atoms = self._select_atoms(
3313 pkg.root, dep_str, self._pkg_use_enabled(pkg),
3314 parent=pkg, strict=True)
3315 except portage.exception.InvalidDependString:
3318 for atoms in selected_atoms.values():
3319 blocker_atoms.extend(x for x in atoms if x.blocker)
3320 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3322 if highest_pkg not in blockers:
3325 # filter packages with invalid deps
3326 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3328 # filter packages that conflict with highest_pkg
3329 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3330 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
3331 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
3336 # If two packages conflict, discard the lower version.
3337 discard_pkgs = set()
3338 greedy_pkgs.sort(reverse=True)
3339 for i in range(len(greedy_pkgs) - 1):
3340 pkg1 = greedy_pkgs[i]
3341 if pkg1 in discard_pkgs:
3343 for j in range(i + 1, len(greedy_pkgs)):
3344 pkg2 = greedy_pkgs[j]
3345 if pkg2 in discard_pkgs:
3347 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
3348 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
3350 discard_pkgs.add(pkg2)
3352 return [pkg.slot_atom for pkg in greedy_pkgs \
3353 if pkg not in discard_pkgs]
3355 def _select_atoms_from_graph(self, *pargs, **kwargs):
3357 Prefer atoms matching packages that have already been
3358 added to the graph or those that are installed and have
3359 not been scheduled for replacement.
3361 kwargs["trees"] = self._dynamic_config._graph_trees
3362 return self._select_atoms_highest_available(*pargs,
3363 **portage._native_kwargs(kwargs))
3365 def _select_atoms_highest_available(self, root, depstring,
3366 myuse=None, parent=None, strict=True, trees=None, priority=None):
3367 """This will raise InvalidDependString if necessary. If trees is
3368 None then self._dynamic_config._filtered_trees is used."""
3370 if not isinstance(depstring, list):
3372 is_valid_flag = None
3373 if parent is not None:
3375 if not parent.installed:
3376 is_valid_flag = parent.iuse.is_valid_flag
3377 depstring = portage.dep.use_reduce(depstring,
3378 uselist=myuse, opconvert=True, token_class=Atom,
3379 is_valid_flag=is_valid_flag, eapi=eapi)
3381 if (self._dynamic_config.myparams.get(
3382 "ignore_built_slot_operator_deps", "n") == "y" and
3383 parent and parent.built):
3384 ignore_built_slot_operator_deps(depstring)
3386 pkgsettings = self._frozen_config.pkgsettings[root]
3388 trees = self._dynamic_config._filtered_trees
3389 mytrees = trees[root]
3390 atom_graph = digraph()
3392 # Temporarily disable autounmask so that || preferences
3393 # account for masking and USE settings.
3394 _autounmask_backup = self._dynamic_config._autounmask
3395 self._dynamic_config._autounmask = False
3396 # backup state for restoration, in case of recursive
3397 # calls to this method
3398 backup_state = mytrees.copy()
3400 # clear state from previous call, in case this
3401 # call is recursive (we have a backup, that we
3402 # will use to restore it later)
3403 mytrees.pop("pkg_use_enabled", None)
3404 mytrees.pop("parent", None)
3405 mytrees.pop("atom_graph", None)
3406 mytrees.pop("priority", None)
3408 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
3409 if parent is not None:
3410 mytrees["parent"] = parent
3411 mytrees["atom_graph"] = atom_graph
3412 if priority is not None:
3413 mytrees["priority"] = priority
3415 mycheck = portage.dep_check(depstring, None,
3416 pkgsettings, myuse=myuse,
3417 myroot=root, trees=trees)
3420 self._dynamic_config._autounmask = _autounmask_backup
3421 mytrees.pop("pkg_use_enabled", None)
3422 mytrees.pop("parent", None)
3423 mytrees.pop("atom_graph", None)
3424 mytrees.pop("priority", None)
3425 mytrees.update(backup_state)
3427 raise portage.exception.InvalidDependString(mycheck[1])
3429 selected_atoms = mycheck[1]
3430 elif parent not in atom_graph:
3431 selected_atoms = {parent : mycheck[1]}
3433 # Recursively traversed virtual dependencies, and their
3434 # direct dependencies, are considered to have the same
3435 # depth as direct dependencies.
3436 if parent.depth is None:
3439 virt_depth = parent.depth + 1
3440 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
3441 selected_atoms = OrderedDict()
3442 node_stack = [(parent, None, None)]
3443 traversed_nodes = set()
3445 node, node_parent, parent_atom = node_stack.pop()
3446 traversed_nodes.add(node)
3450 if node_parent is parent:
3451 if priority is None:
3452 node_priority = None
3454 node_priority = priority.copy()
3456 # virtuals only have runtime deps
3457 node_priority = self._priority(runtime=True)
3459 k = Dependency(atom=parent_atom,
3460 blocker=parent_atom.blocker, child=node,
3461 depth=virt_depth, parent=node_parent,
3462 priority=node_priority, root=node.root)
3465 selected_atoms[k] = child_atoms
3466 for atom_node in atom_graph.child_nodes(node):
3467 child_atom = atom_node[0]
3468 if id(child_atom) not in chosen_atom_ids:
3470 child_atoms.append(child_atom)
3471 for child_node in atom_graph.child_nodes(atom_node):
3472 if child_node in traversed_nodes:
3474 if not portage.match_from_list(
3475 child_atom, [child_node]):
3476 # Typically this means that the atom
3477 # specifies USE deps that are unsatisfied
3478 # by the selected package. The caller will
3479 # record this as an unsatisfied dependency
3482 node_stack.append((child_node, node, child_atom))
3484 return selected_atoms
3486 def _expand_virt_from_graph(self, root, atom):
3487 if not isinstance(atom, Atom):
3489 graphdb = self._dynamic_config.mydbapi[root]
3490 match = graphdb.match_pkgs(atom)
3495 if not pkg.cpv.startswith("virtual/"):
3499 rdepend = self._select_atoms_from_graph(
3500 pkg.root, pkg._metadata.get("RDEPEND", ""),
3501 myuse=self._pkg_use_enabled(pkg),
3502 parent=pkg, strict=False)
3503 except InvalidDependString as e:
3504 writemsg_level("!!! Invalid RDEPEND in " + \
3505 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3506 (pkg.root, pkg.cpv, e),
3507 noiselevel=-1, level=logging.ERROR)
3511 for atoms in rdepend.values():
3513 if hasattr(atom, "_orig_atom"):
3514 # Ignore virtual atoms since we're only
3515 # interested in expanding the real atoms.
3519 def _virt_deps_visible(self, pkg, ignore_use=False):
3521 Assumes pkg is a virtual package. Traverses virtual deps recursively
3522 and returns True if all deps are visible, False otherwise. This is
3523 useful for checking if it will be necessary to expand virtual slots,
3524 for cases like bug #382557.
3527 rdepend = self._select_atoms(
3528 pkg.root, pkg._metadata.get("RDEPEND", ""),
3529 myuse=self._pkg_use_enabled(pkg),
3530 parent=pkg, priority=self._priority(runtime=True))
3531 except InvalidDependString as e:
3532 if not pkg.installed:
3534 writemsg_level("!!! Invalid RDEPEND in " + \
3535 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3536 (pkg.root, pkg.cpv, e),
3537 noiselevel=-1, level=logging.ERROR)
3540 for atoms in rdepend.values():
3543 atom = atom.without_use
3544 pkg, existing = self._select_package(
3546 if pkg is None or not self._pkg_visibility_check(pkg):
3551 def _get_dep_chain(self, start_node, target_atom=None,
3552 unsatisfied_dependency=False):
3554 Returns a list of (atom, node_type) pairs that represent a dep chain.
3555 If target_atom is None, the first package shown is pkg's parent.
3556 If target_atom is not None the first package shown is pkg.
3557 If unsatisfied_dependency is True, the first parent is select who's
3558 dependency is not satisfied by 'pkg'. This is need for USE changes.
3559 (Does not support target_atom.)
3561 traversed_nodes = set()
3565 all_parents = self._dynamic_config._parent_atoms
3566 graph = self._dynamic_config.digraph
3567 verbose_main_repo_display = "--verbose-main-repo-display" in \
3568 self._frozen_config.myopts
3570 def format_pkg(pkg):
3571 pkg_name = "%s" % (pkg.cpv,)
3572 if verbose_main_repo_display or pkg.repo != \
3573 pkg.root_config.settings.repositories.mainRepo().name:
3574 pkg_name += _repo_separator + pkg.repo
3577 if target_atom is not None and isinstance(node, Package):
3578 affecting_use = set()
3579 for dep_str in Package._dep_keys:
3581 affecting_use.update(extract_affecting_use(
3582 node._metadata[dep_str], target_atom,
3584 except InvalidDependString:
3585 if not node.installed:
3587 affecting_use.difference_update(node.use.mask, node.use.force)
3588 pkg_name = format_pkg(node)
3592 for flag in affecting_use:
3593 if flag in self._pkg_use_enabled(node):
3596 usedep.append("-"+flag)
3597 pkg_name += "[%s]" % ",".join(usedep)
3599 dep_chain.append((pkg_name, node.type_name))
3602 # To build a dep chain for the given package we take
3603 # "random" parents form the digraph, except for the
3604 # first package, because we want a parent that forced
3605 # the corresponding change (i.e '>=foo-2', instead 'foo').
3607 traversed_nodes.add(start_node)
3609 start_node_parent_atoms = {}
3610 for ppkg, patom in all_parents.get(node, []):
3611 # Get a list of suitable atoms. For use deps
3612 # (aka unsatisfied_dependency is not None) we
3613 # need that the start_node doesn't match the atom.
3614 if not unsatisfied_dependency or \
3615 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
3616 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
3618 if start_node_parent_atoms:
3619 # If there are parents in all_parents then use one of them.
3620 # If not, then this package got pulled in by an Arg and
3621 # will be correctly handled by the code that handles later
3622 # packages in the dep chain.
3623 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
3626 for ppkg in start_node_parent_atoms[best_match]:
3628 if ppkg in self._dynamic_config._initial_arg_list:
3629 # Stop if reached the top level of the dep chain.
3632 while node is not None:
3633 traversed_nodes.add(node)
3635 if node not in graph:
3636 # The parent is not in the graph due to backtracking.
3639 elif isinstance(node, DependencyArg):
3640 if graph.parent_nodes(node):
3643 node_type = "argument"
3644 dep_chain.append(("%s" % (node,), node_type))
3646 elif node is not start_node:
3647 for ppkg, patom in all_parents[child]:
3649 if child is start_node and unsatisfied_dependency and \
3650 InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
3651 # This atom is satisfied by child, there must be another atom.
3653 atom = patom.unevaluated_atom
3657 priorities = graph.nodes[node][0].get(child)
3658 if priorities is None:
3659 # This edge comes from _parent_atoms and was not added to
3660 # the graph, and _parent_atoms does not contain priorities.
3661 for k in Package._dep_keys:
3662 dep_strings.add(node._metadata[k])
3664 for priority in priorities:
3665 if priority.buildtime:
3666 for k in Package._buildtime_keys:
3667 dep_strings.add(node._metadata[k])
3668 if priority.runtime:
3669 dep_strings.add(node._metadata["RDEPEND"])
3670 if priority.runtime_post:
3671 dep_strings.add(node._metadata["PDEPEND"])
3673 affecting_use = set()
3674 for dep_str in dep_strings:
3676 affecting_use.update(extract_affecting_use(
3677 dep_str, atom, eapi=node.eapi))
3678 except InvalidDependString:
3679 if not node.installed:
3682 #Don't show flags as 'affecting' if the user can't change them,
3683 affecting_use.difference_update(node.use.mask, \
3686 pkg_name = format_pkg(node)
3689 for flag in affecting_use:
3690 if flag in self._pkg_use_enabled(node):
3693 usedep.append("-"+flag)
3694 pkg_name += "[%s]" % ",".join(usedep)
3696 dep_chain.append((pkg_name, node.type_name))
3698 # When traversing to parents, prefer arguments over packages
3699 # since arguments are root nodes. Never traverse the same
3700 # package twice, in order to prevent an infinite loop.
3702 selected_parent = None
3705 parent_unsatisfied = None
3707 for parent in self._dynamic_config.digraph.parent_nodes(node):
3708 if parent in traversed_nodes:
3710 if isinstance(parent, DependencyArg):
3713 if isinstance(parent, Package) and \
3714 parent.operation == "merge":
3715 parent_merge = parent
3716 if unsatisfied_dependency and node is start_node:
3717 # Make sure that pkg doesn't satisfy parent's dependency.
3718 # This ensures that we select the correct parent for use
3720 for ppkg, atom in all_parents[start_node]:
3722 atom_set = InternalPackageSet(initial_atoms=(atom,))
3723 if not atom_set.findAtomForPackage(start_node):
3724 parent_unsatisfied = parent
3727 selected_parent = parent
3729 if parent_unsatisfied is not None:
3730 selected_parent = parent_unsatisfied
3731 elif parent_merge is not None:
3732 # Prefer parent in the merge list (bug #354747).
3733 selected_parent = parent_merge
3734 elif parent_arg is not None:
3735 if self._dynamic_config.digraph.parent_nodes(parent_arg):
3736 selected_parent = parent_arg
3738 dep_chain.append(("%s" % (parent_arg,), "argument"))
3739 selected_parent = None
3741 node = selected_parent
3744 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
3745 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
3747 for node, node_type in dep_chain:
3748 if node_type == "argument":
3749 display_list.append("required by %s (argument)" % node)
3751 display_list.append("required by %s" % node)
3753 msg = "# " + "\n# ".join(display_list) + "\n"
3757 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
3758 check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
3760 When check_backtrack=True, no output is produced and
3761 the method either returns or raises _backtrack_mask if
3762 a matching package has been masked by backtracking.
3764 backtrack_mask = False
3765 autounmask_broke_use_dep = False
3766 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
3768 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
3770 xinfo = '"%s"' % atom.unevaluated_atom
3773 if isinstance(myparent, AtomArg):
3774 xinfo = '"%s"' % (myparent,)
3775 # Discard null/ from failed cpv_expand category expansion.
3776 xinfo = xinfo.replace("null/", "")
3777 if root != self._frozen_config._running_root.root:
3778 xinfo = "%s for %s" % (xinfo, root)
3779 masked_packages = []
3781 missing_use_adjustable = set()
3782 required_use_unsatisfied = []
3783 masked_pkg_instances = set()
3784 have_eapi_mask = False
3785 pkgsettings = self._frozen_config.pkgsettings[root]
3786 root_config = self._frozen_config.roots[root]
3787 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3788 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3789 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
3790 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3791 for db, pkg_type, built, installed, db_keys in dbs:
3794 if hasattr(db, "xmatch"):
3795 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
3797 cpv_list = db.match(atom.without_use)
3799 if atom.repo is None and hasattr(db, "getRepositories"):
3800 repo_list = db.getRepositories()
3802 repo_list = [atom.repo]
3806 for cpv in cpv_list:
3807 for repo in repo_list:
3808 if not db.cpv_exists(cpv, myrepo=repo):
3811 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
3812 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
3813 if metadata is not None and \
3814 portage.eapi_is_supported(metadata["EAPI"]):
3816 repo = metadata.get('repository')
3817 pkg = self._pkg(cpv, pkg_type, root_config,
3818 installed=installed, myrepo=repo)
3819 # pkg._metadata contains calculated USE for ebuilds,
3820 # required later for getMissingLicenses.
3821 metadata = pkg._metadata
3823 # Avoid doing any operations with packages that
3824 # have invalid metadata. It would be unsafe at
3825 # least because it could trigger unhandled
3826 # exceptions in places like check_required_use().
3827 masked_packages.append(
3828 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3830 if not atom_set.findAtomForPackage(pkg,
3831 modified_use=self._pkg_use_enabled(pkg)):
3833 if pkg in self._dynamic_config._runtime_pkg_mask:
3834 backtrack_reasons = \
3835 self._dynamic_config._runtime_pkg_mask[pkg]
3836 mreasons.append('backtracking: %s' % \
3837 ', '.join(sorted(backtrack_reasons)))
3838 backtrack_mask = True
3839 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3840 modified_use=self._pkg_use_enabled(pkg)):
3841 mreasons = ["exclude option"]
3843 masked_pkg_instances.add(pkg)
3844 if atom.unevaluated_atom.use:
3846 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3847 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3848 missing_use.append(pkg)
3849 if atom_set_with_use.findAtomForPackage(pkg):
3850 autounmask_broke_use_dep = True
3854 writemsg("violated_conditionals raised " + \
3855 "InvalidAtom: '%s' parent: %s" % \
3856 (atom, myparent), noiselevel=-1)
3858 if not mreasons and \
3860 pkg._metadata.get("REQUIRED_USE") and \
3861 eapi_has_required_use(pkg.eapi):
3862 if not check_required_use(
3863 pkg._metadata["REQUIRED_USE"],
3864 self._pkg_use_enabled(pkg),
3865 pkg.iuse.is_valid_flag,
3867 required_use_unsatisfied.append(pkg)
3869 root_slot = (pkg.root, pkg.slot_atom)
3870 if pkg.built and root_slot in self._rebuild.rebuild_list:
3871 mreasons = ["need to rebuild from source"]
3872 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3873 mreasons = ["need to rebuild from source"]
3874 elif pkg.built and not mreasons:
3875 mreasons = ["use flag configuration mismatch"]
3876 masked_packages.append(
3877 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3881 raise self._backtrack_mask()
3885 if check_autounmask_breakage:
3886 if autounmask_broke_use_dep:
3887 raise self._autounmask_breakage()
3891 missing_use_reasons = []
3892 missing_iuse_reasons = []
3893 for pkg in missing_use:
3894 use = self._pkg_use_enabled(pkg)
3896 #Use the unevaluated atom here, because some flags might have gone
3897 #lost during evaluation.
3898 required_flags = atom.unevaluated_atom.use.required
3899 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3903 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3904 missing_iuse_reasons.append((pkg, mreasons))
3906 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3907 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3909 untouchable_flags = \
3910 frozenset(chain(pkg.use.mask, pkg.use.force))
3911 if any(x in untouchable_flags for x in
3912 chain(need_enable, need_disable)):
3915 missing_use_adjustable.add(pkg)
3916 required_use = pkg._metadata.get("REQUIRED_USE")
3917 required_use_warning = ""
3919 old_use = self._pkg_use_enabled(pkg)
3920 new_use = set(self._pkg_use_enabled(pkg))
3921 for flag in need_enable:
3923 for flag in need_disable:
3924 new_use.discard(flag)
3925 if check_required_use(required_use, old_use,
3926 pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
3927 and not check_required_use(required_use, new_use,
3928 pkg.iuse.is_valid_flag, eapi=pkg.eapi):
3929 required_use_warning = ", this change violates use flag constraints " + \
3930 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3932 if need_enable or need_disable:
3934 changes.extend(colorize("red", "+" + x) \
3935 for x in need_enable)
3936 changes.extend(colorize("blue", "-" + x) \
3937 for x in need_disable)
3938 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3939 missing_use_reasons.append((pkg, mreasons))
3941 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3942 # Lets see if the violated use deps are conditional.
3943 # If so, suggest to change them on the parent.
3945 # If the child package is masked then a change to
3946 # parent USE is not a valid solution (a normal mask
3947 # message should be displayed instead).
3948 if pkg in masked_pkg_instances:
3952 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3953 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3954 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3955 #all violated use deps are conditional
3957 conditional = violated_atom.use.conditional
3958 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3959 conditional.enabled, conditional.disabled))
3961 untouchable_flags = \
3962 frozenset(chain(myparent.use.mask, myparent.use.force))
3963 if any(x in untouchable_flags for x in involved_flags):
3966 required_use = myparent._metadata.get("REQUIRED_USE")
3967 required_use_warning = ""
3969 old_use = self._pkg_use_enabled(myparent)
3970 new_use = set(self._pkg_use_enabled(myparent))
3971 for flag in involved_flags:
3973 new_use.discard(flag)
3976 if check_required_use(required_use, old_use,
3977 myparent.iuse.is_valid_flag,
3978 eapi=myparent.eapi) and \
3979 not check_required_use(required_use, new_use,
3980 myparent.iuse.is_valid_flag,
3981 eapi=myparent.eapi):
3982 required_use_warning = ", this change violates use flag constraints " + \
3983 "defined by %s: '%s'" % (myparent.cpv, \
3984 human_readable_required_use(required_use))
3986 for flag in involved_flags:
3987 if flag in self._pkg_use_enabled(myparent):
3988 changes.append(colorize("blue", "-" + flag))
3990 changes.append(colorize("red", "+" + flag))
3991 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3992 if (myparent, mreasons) not in missing_use_reasons:
3993 missing_use_reasons.append((myparent, mreasons))
3995 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3996 in missing_use_reasons if pkg not in masked_pkg_instances]
3998 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3999 in missing_iuse_reasons if pkg not in masked_pkg_instances]
4001 show_missing_use = False
4002 if unmasked_use_reasons:
4003 # Only show the latest version.
4004 show_missing_use = []
4006 parent_reason = None
4007 for pkg, mreasons in unmasked_use_reasons:
4009 if parent_reason is None:
4010 #This happens if a use change on the parent
4011 #leads to a satisfied conditional use dep.
4012 parent_reason = (pkg, mreasons)
4013 elif pkg_reason is None:
4014 #Don't rely on the first pkg in unmasked_use_reasons,
4015 #being the highest version of the dependency.
4016 pkg_reason = (pkg, mreasons)
4018 show_missing_use.append(pkg_reason)
4020 show_missing_use.append(parent_reason)
4022 elif unmasked_iuse_reasons:
4023 masked_with_iuse = False
4024 for pkg in masked_pkg_instances:
4025 #Use atom.unevaluated here, because some flags might have gone
4026 #lost during evaluation.
4027 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
4028 # Package(s) with required IUSE are masked,
4029 # so display a normal masking message.
4030 masked_with_iuse = True
4032 if not masked_with_iuse:
4033 show_missing_use = unmasked_iuse_reasons
4035 if required_use_unsatisfied:
4036 # If there's a higher unmasked version in missing_use_adjustable
4037 # then we want to show that instead.
4038 for pkg in missing_use_adjustable:
4039 if pkg not in masked_pkg_instances and \
4040 pkg > required_use_unsatisfied[0]:
4041 required_use_unsatisfied = False
4046 if show_req_use is None and required_use_unsatisfied:
4047 # We have an unmasked package that only requires USE adjustment
4048 # in order to satisfy REQUIRED_USE, and nothing more. We assume
4049 # that the user wants the latest version, so only the first
4050 # instance is displayed.
4051 show_req_use = required_use_unsatisfied[0]
4053 if show_req_use is not None:
4056 output_cpv = pkg.cpv + _repo_separator + pkg.repo
4057 writemsg("\n!!! " + \
4058 colorize("BAD", "The ebuild selected to satisfy ") + \
4059 colorize("INFORM", xinfo) + \
4060 colorize("BAD", " has unmet requirements.") + "\n",
4062 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
4063 writemsg("- %s %s\n" % (output_cpv, use_display),
4065 writemsg("\n The following REQUIRED_USE flag constraints " + \
4066 "are unsatisfied:\n", noiselevel=-1)
4067 reduced_noise = check_required_use(
4068 pkg._metadata["REQUIRED_USE"],
4069 self._pkg_use_enabled(pkg),
4070 pkg.iuse.is_valid_flag,
4071 eapi=pkg.eapi).tounicode()
4072 writemsg(" %s\n" % \
4073 human_readable_required_use(reduced_noise),
4075 normalized_required_use = \
4076 " ".join(pkg._metadata["REQUIRED_USE"].split())
4077 if reduced_noise != normalized_required_use:
4078 writemsg("\n The above constraints " + \
4079 "are a subset of the following complete expression:\n",
4081 writemsg(" %s\n" % \
4082 human_readable_required_use(normalized_required_use),
4084 writemsg("\n", noiselevel=-1)
4086 elif show_missing_use:
4087 writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
4088 writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
4089 for pkg, mreasons in show_missing_use:
4090 writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
4092 elif masked_packages:
4093 writemsg("\n!!! " + \
4094 colorize("BAD", "All ebuilds that could satisfy ") + \
4095 colorize("INFORM", xinfo) + \
4096 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
4097 writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
4098 have_eapi_mask = show_masked_packages(masked_packages)
4100 writemsg("\n", noiselevel=-1)
4101 msg = ("The current version of portage supports " + \
4102 "EAPI '%s'. You must upgrade to a newer version" + \
4103 " of portage before EAPI masked packages can" + \
4104 " be installed.") % portage.const.EAPI
4105 writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
4106 writemsg("\n", noiselevel=-1)
4110 if not atom.cp.startswith("null/"):
4111 for pkg in self._iter_match_pkgs_any(
4112 root_config, Atom(atom.cp)):
4116 writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
4117 if isinstance(myparent, AtomArg) and \
4119 self._frozen_config.myopts.get(
4120 "--misspell-suggestions", "y") != "n":
4122 writemsg("\nemerge: searching for similar names..."
4126 if "--usepkgonly" not in self._frozen_config.myopts:
4128 if "--usepkg" in self._frozen_config.myopts:
4131 matches = similar_name_search(dbs, atom)
4133 if len(matches) == 1:
4134 writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
4136 elif len(matches) > 1:
4138 "\nemerge: Maybe you meant any of these: %s?\n" % \
4139 (", ".join(matches),), noiselevel=-1)
4141 # Generally, this would only happen if
4142 # all dbapis are empty.
4143 writemsg(" nothing similar found.\n"
4146 if not isinstance(myparent, AtomArg):
4147 # It's redundant to show parent for AtomArg since
4148 # it's the same as 'xinfo' displayed above.
4149 dep_chain = self._get_dep_chain(myparent, atom)
4150 for node, node_type in dep_chain:
4151 msg.append('(dependency required by "%s" [%s])' % \
4152 (colorize('INFORM', "%s" % (node)), node_type))
4155 writemsg("\n".join(msg), noiselevel=-1)
4156 writemsg("\n", noiselevel=-1)
4160 writemsg("\n", noiselevel=-1)
4162 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
4163 for db, pkg_type, built, installed, db_keys in \
4164 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
4165 for pkg in self._iter_match_pkgs(root_config,
4166 pkg_type, atom, onlydeps=onlydeps):
4169 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
4171 Iterate over Package instances of pkg_type matching the given atom.
4172 This does not check visibility and it also does not match USE for
4173 unbuilt ebuilds since USE are lazily calculated after visibility
4174 checks (to avoid the expense when possible).
4177 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
4178 atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
4179 cp_list = db.cp_list(atom_exp.cp)
4180 matched_something = False
4181 installed = pkg_type == 'installed'
4184 atom_set = InternalPackageSet(initial_atoms=(atom,),
4186 if atom.repo is None and hasattr(db, "getRepositories"):
4187 repo_list = db.getRepositories()
4189 repo_list = [atom.repo]
4194 # Call match_from_list on one cpv at a time, in order
4195 # to avoid unnecessary match_from_list comparisons on
4196 # versions that are never yielded from this method.
4197 if not match_from_list(atom_exp, [cpv]):
4199 for repo in repo_list:
4202 pkg = self._pkg(cpv, pkg_type, root_config,
4203 installed=installed, onlydeps=onlydeps, myrepo=repo)
4204 except portage.exception.PackageNotFound:
4207 # A cpv can be returned from dbapi.match() as an
4208 # old-style virtual match even in cases when the
4209 # package does not actually PROVIDE the virtual.
4210 # Filter out any such false matches here.
4212 # Make sure that cpv from the current repo satisfies the atom.
4213 # This might not be the case if there are several repos with
4214 # the same cpv, but different metadata keys, like SLOT.
4215 # Also, parts of the match that require metadata access
4216 # are deferred until we have cached the metadata in a
4218 if not atom_set.findAtomForPackage(pkg,
4219 modified_use=self._pkg_use_enabled(pkg)):
4221 matched_something = True
4224 # USE=multislot can make an installed package appear as if
4225 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
4226 # won't do any good as long as USE=multislot is enabled since
4227 # the newly built package still won't have the expected slot.
4228 # Therefore, assume that such SLOT dependencies are already
4229 # satisfied rather than forcing a rebuild.
4230 if not matched_something and installed and \
4231 atom.slot is not None and not atom.slot_operator_built:
4233 if "remove" in self._dynamic_config.myparams:
4234 # We need to search the portdbapi, which is not in our
4235 # normal dbs list, in order to find the real SLOT.
4236 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
4237 db_keys = list(portdb._aux_cache_keys)
4238 dbs = [(portdb, "ebuild", False, False, db_keys)]
4240 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
4242 cp_list = db.cp_list(atom_exp.cp)
4244 atom_set = InternalPackageSet(
4245 initial_atoms=(atom.without_slot,), allow_repo=True)
4246 atom_exp_without_slot = atom_exp.without_slot
4249 if not match_from_list(atom_exp_without_slot, [cpv]):
4251 slot_available = False
4252 for other_db, other_type, other_built, \
4253 other_installed, other_keys in dbs:
4255 if portage.dep._match_slot(atom,
4256 other_db._pkg_str(_unicode(cpv), None)):
4257 slot_available = True
4259 except (KeyError, InvalidData):
4261 if not slot_available:
4263 inst_pkg = self._pkg(cpv, "installed",
4264 root_config, installed=installed, myrepo=atom.repo)
4265 # Remove the slot from the atom and verify that
4266 # the package matches the resulting atom.
4267 if atom_set.findAtomForPackage(inst_pkg):
4271 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
4272 cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
4273 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
4276 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
4277 self._dynamic_config._highest_pkg_cache[cache_key] = ret
4280 if self._pkg_visibility_check(pkg) and \
4281 not (pkg.installed and pkg.masks):
4282 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
4285 def _want_installed_pkg(self, pkg):
4287 Given an installed package returned from select_pkg, return
4288 True if the user has not explicitly requested for this package
4289 to be replaced (typically via an atom on the command line).
4291 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
4292 modified_use=self._pkg_use_enabled(pkg)):
4297 for arg, atom in self._iter_atoms_for_pkg(pkg):
4298 if arg.force_reinstall:
4300 except InvalidDependString:
4303 if "selective" in self._dynamic_config.myparams:
4308 def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
4311 pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
4312 except portage.exception.PackageNotFound:
4313 pkg_eb_visible = False
4314 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
4315 "ebuild", Atom("=%s" % (pkg.cpv,))):
4316 if self._pkg_visibility_check(pkg_eb, autounmask_level):
4317 pkg_eb_visible = True
4319 if not pkg_eb_visible:
4322 if not self._pkg_visibility_check(pkg_eb, autounmask_level):
4327 def _equiv_binary_installed(self, pkg):
4328 build_time = pkg.build_time
4333 inst_pkg = self._pkg(pkg.cpv, "installed",
4334 pkg.root_config, installed=True)
4335 except PackageNotFound:
4338 return build_time == inst_pkg.build_time
4340 class _AutounmaskLevel(object):
4341 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
4342 "allow_missing_keywords", "allow_unmasks")
4345 self.allow_use_changes = False
4346 self.allow_license_changes = False
4347 self.allow_unstable_keywords = False
4348 self.allow_missing_keywords = False
4349 self.allow_unmasks = False
4351 def _autounmask_levels(self):
4353 Iterate over the different allowed things to unmask.
4357 2. USE + ~arch + license
4358 3. USE + ~arch + license + missing keywords
4359 4. USE + license + masks
4360 5. USE + ~arch + license + masks
4361 6. USE + ~arch + license + missing keywords + masks
4364 * Do least invasive changes first.
4365 * Try unmasking alone before unmasking + missing keywords
4366 to avoid -9999 versions if possible
4369 if self._dynamic_config._autounmask is not True:
4372 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
4373 autounmask_level = self._AutounmaskLevel()
4375 autounmask_level.allow_use_changes = True
4376 yield autounmask_level
4378 autounmask_level.allow_license_changes = True
4379 yield autounmask_level
4381 autounmask_level.allow_unstable_keywords = True
4382 yield autounmask_level
4384 if not autounmask_keep_masks:
4386 autounmask_level.allow_missing_keywords = True
4387 yield autounmask_level
4389 # 4. USE + license + masks
4390 # Try to respect keywords while discarding
4391 # package.mask (see bug #463394).
4392 autounmask_level.allow_unstable_keywords = False
4393 autounmask_level.allow_missing_keywords = False
4394 autounmask_level.allow_unmasks = True
4395 yield autounmask_level
4397 autounmask_level.allow_unstable_keywords = True
4399 for missing_keyword, unmask in ((False, True), (True, True)):
4401 autounmask_level.allow_missing_keywords = missing_keyword
4402 autounmask_level.allow_unmasks = unmask
4404 yield autounmask_level
4407 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
4408 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
4410 default_selection = (pkg, existing)
4412 if self._dynamic_config._autounmask is True:
4413 if pkg is not None and \
4415 not self._want_installed_pkg(pkg):
4418 # Temporarily reset _need_restart state, in order to
4419 # avoid interference as reported in bug #459832.
4420 earlier_need_restart = self._dynamic_config._need_restart
4421 self._dynamic_config._need_restart = False
4423 for autounmask_level in self._autounmask_levels():
4428 self._wrapped_select_pkg_highest_available_imp(
4429 root, atom, onlydeps=onlydeps,
4430 autounmask_level=autounmask_level)
4432 if pkg is not None and \
4434 not self._want_installed_pkg(pkg):
4437 if self._dynamic_config._need_restart:
4440 if earlier_need_restart:
4441 self._dynamic_config._need_restart = True
4444 # This ensures that we can fall back to an installed package
4445 # that may have been rejected in the autounmask path above.
4446 return default_selection
4448 return pkg, existing
4450 def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
4455 if trust_graph and pkg in self._dynamic_config.digraph:
4456 # Sometimes we need to temporarily disable
4457 # dynamic_config._autounmask, but for overall
4458 # consistency in dependency resolution, in most
4459 # cases we want to treat packages in the graph
4460 # as though they are visible.
4463 if not self._dynamic_config._autounmask or autounmask_level is None:
4466 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4467 root_config = self._frozen_config.roots[pkg.root]
4468 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4470 masked_by_unstable_keywords = False
4471 masked_by_missing_keywords = False
4472 missing_licenses = None
4473 masked_by_something_else = False
4474 masked_by_p_mask = False
4476 for reason in mreasons:
4477 hint = reason.unmask_hint
4480 masked_by_something_else = True
4481 elif hint.key == "unstable keyword":
4482 masked_by_unstable_keywords = True
4483 if hint.value == "**":
4484 masked_by_missing_keywords = True
4485 elif hint.key == "p_mask":
4486 masked_by_p_mask = True
4487 elif hint.key == "license":
4488 missing_licenses = hint.value
4490 masked_by_something_else = True
4492 if masked_by_something_else:
4495 if pkg in self._dynamic_config._needed_unstable_keywords:
4496 #If the package is already keyworded, remove the mask.
4497 masked_by_unstable_keywords = False
4498 masked_by_missing_keywords = False
4500 if pkg in self._dynamic_config._needed_p_mask_changes:
4501 #If the package is already keyworded, remove the mask.
4502 masked_by_p_mask = False
4504 if missing_licenses:
4505 #If the needed licenses are already unmasked, remove the mask.
4506 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
4508 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
4509 #Package has already been unmasked.
4512 if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
4513 (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
4514 (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
4515 (missing_licenses and not autounmask_level.allow_license_changes):
4516 #We are not allowed to do the needed changes.
4519 if masked_by_unstable_keywords:
4520 self._dynamic_config._needed_unstable_keywords.add(pkg)
4521 backtrack_infos = self._dynamic_config._backtrack_infos
4522 backtrack_infos.setdefault("config", {})
4523 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
4524 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
4526 if masked_by_p_mask:
4527 self._dynamic_config._needed_p_mask_changes.add(pkg)
4528 backtrack_infos = self._dynamic_config._backtrack_infos
4529 backtrack_infos.setdefault("config", {})
4530 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
4531 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
4533 if missing_licenses:
4534 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
4535 backtrack_infos = self._dynamic_config._backtrack_infos
4536 backtrack_infos.setdefault("config", {})
4537 backtrack_infos["config"].setdefault("needed_license_changes", set())
4538 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
4542 def _pkg_use_enabled(self, pkg, target_use=None):
4544 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
4545 If target_use is given, the need changes are computed to make the package useable.
4546 Example: target_use = { "foo": True, "bar": False }
4547 The flags target_use must be in the pkg's IUSE.
4550 return pkg.use.enabled
4551 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
4553 if target_use is None:
4554 if needed_use_config_change is None:
4555 return pkg.use.enabled
4557 return needed_use_config_change[0]
4559 if needed_use_config_change is not None:
4560 old_use = needed_use_config_change[0]
4562 old_changes = needed_use_config_change[1]
4563 new_changes = old_changes.copy()
4565 old_use = pkg.use.enabled
4570 for flag, state in target_use.items():
4571 real_flag = pkg.iuse.get_real_flag(flag)
4572 if real_flag is None:
4573 # Triggered by use-dep defaults.
4576 if real_flag not in old_use:
4577 if new_changes.get(real_flag) == False:
4579 new_changes[real_flag] = True
4582 if real_flag in old_use:
4583 if new_changes.get(real_flag) == True:
4585 new_changes[real_flag] = False
4586 new_use.update(old_use.difference(target_use))
4588 def want_restart_for_use_change(pkg, new_use):
4589 if pkg not in self._dynamic_config.digraph.nodes:
4592 for key in Package._dep_keys + ("LICENSE",):
4593 dep = pkg._metadata[key]
4594 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4595 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4597 if old_val != new_val:
4600 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4601 if not parent_atoms:
4604 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
4605 for ppkg, atom in parent_atoms:
4606 if not atom.use or \
4607 not any(x in atom.use.required for x in changes):
4614 if new_changes != old_changes:
4615 #Don't do the change if it violates REQUIRED_USE.
4616 required_use = pkg._metadata.get("REQUIRED_USE")
4617 if required_use and check_required_use(required_use, old_use,
4618 pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
4619 not check_required_use(required_use, new_use,
4620 pkg.iuse.is_valid_flag, eapi=pkg.eapi):
4623 if any(x in pkg.use.mask for x in new_changes) or \
4624 any(x in pkg.use.force for x in new_changes):
4627 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
4628 backtrack_infos = self._dynamic_config._backtrack_infos
4629 backtrack_infos.setdefault("config", {})
4630 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
4631 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
4632 if want_restart_for_use_change(pkg, new_use):
4633 self._dynamic_config._need_restart = True
4636 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
4637 root_config = self._frozen_config.roots[root]
4638 pkgsettings = self._frozen_config.pkgsettings[root]
4639 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
4640 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
4641 # List of acceptable packages, ordered by type preference.
4642 matched_packages = []
4643 matched_pkgs_ignore_use = []
4644 highest_version = None
4645 if not isinstance(atom, portage.dep.Atom):
4646 atom = portage.dep.Atom(atom)
4648 have_new_virt = atom_cp.startswith("virtual/") and \
4649 self._have_new_virt(root, atom_cp)
4650 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
4651 existing_node = None
4653 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
4654 usepkg = "--usepkg" in self._frozen_config.myopts
4655 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
4656 empty = "empty" in self._dynamic_config.myparams
4657 selective = "selective" in self._dynamic_config.myparams
4659 avoid_update = "--update" not in self._frozen_config.myopts
4660 dont_miss_updates = "--update" in self._frozen_config.myopts
4661 use_ebuild_visibility = self._frozen_config.myopts.get(
4662 '--use-ebuild-visibility', 'n') != 'n'
4663 reinstall_atoms = self._frozen_config.reinstall_atoms
4664 usepkg_exclude = self._frozen_config.usepkg_exclude
4665 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
4667 # Behavior of the "selective" parameter depends on
4668 # whether or not a package matches an argument atom.
4669 # If an installed package provides an old-style
4670 # virtual that is no longer provided by an available
4671 # package, the installed package may match an argument
4672 # atom even though none of the available packages do.
4673 # Therefore, "selective" logic does not consider
4674 # whether or not an installed package matches an
4675 # argument atom. It only considers whether or not
4676 # available packages match argument atoms, which is
4677 # represented by the found_available_arg flag.
4678 found_available_arg = False
4679 packages_with_invalid_use_config = []
4680 for find_existing_node in True, False:
4683 for db, pkg_type, built, installed, db_keys in dbs:
4686 if installed and not find_existing_node:
4687 want_reinstall = reinstall or empty or \
4688 (found_available_arg and not selective)
4689 if want_reinstall and matched_packages:
4692 # Ignore USE deps for the initial match since we want to
4693 # ensure that updates aren't missed solely due to the user's
4694 # USE configuration.
4695 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
4697 if pkg.cp != atom_cp and have_new_virt:
4698 # pull in a new-style virtual instead
4700 if pkg in self._dynamic_config._runtime_pkg_mask:
4701 # The package has been masked by the backtracking logic
4703 root_slot = (pkg.root, pkg.slot_atom)
4704 if pkg.built and root_slot in self._rebuild.rebuild_list:
4706 if (pkg.installed and
4707 root_slot in self._rebuild.reinstall_list):
4710 if not pkg.installed and \
4711 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
4712 modified_use=self._pkg_use_enabled(pkg)):
4715 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
4716 modified_use=self._pkg_use_enabled(pkg)):
4719 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
4720 modified_use=self._pkg_use_enabled(pkg))
4722 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
4723 (not pkg.installed or dont_miss_updates):
4724 # Check if a higher version was rejected due to user
4725 # USE configuration. The packages_with_invalid_use_config
4726 # list only contains unbuilt ebuilds since USE can't
4727 # be changed for built packages.
4728 higher_version_rejected = False
4729 repo_priority = pkg.repo_priority
4730 for rejected in packages_with_invalid_use_config:
4731 if rejected.cp != pkg.cp:
4734 higher_version_rejected = True
4736 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
4737 # If version is identical then compare
4738 # repo priority (see bug #350254).
4739 rej_repo_priority = rejected.repo_priority
4740 if rej_repo_priority is not None and \
4741 (repo_priority is None or
4742 rej_repo_priority > repo_priority):
4743 higher_version_rejected = True
4745 if higher_version_rejected:
4749 reinstall_for_flags = None
4751 if not pkg.installed or \
4752 (matched_packages and not avoid_update):
4753 # Only enforce visibility on installed packages
4754 # if there is at least one other visible package
4755 # available. By filtering installed masked packages
4756 # here, packages that have been masked since they
4757 # were installed can be automatically downgraded
4758 # to an unmasked version. NOTE: This code needs to
4759 # be consistent with masking behavior inside
4760 # _dep_check_composite_db, in order to prevent
4761 # incorrect choices in || deps like bug #351828.
4763 if not self._pkg_visibility_check(pkg, autounmask_level):
4766 # Enable upgrade or downgrade to a version
4767 # with visible KEYWORDS when the installed
4768 # version is masked by KEYWORDS, but never
4769 # reinstall the same exact version only due
4770 # to a KEYWORDS mask. See bug #252167.
4772 if pkg.type_name != "ebuild" and matched_packages:
4773 # Don't re-install a binary package that is
4774 # identical to the currently installed package
4775 # (see bug #354441).
4776 identical_binary = False
4777 if usepkg and pkg.installed:
4778 for selected_pkg in matched_packages:
4779 if selected_pkg.type_name == "binary" and \
4780 selected_pkg.cpv == pkg.cpv and \
4781 selected_pkg.build_time == \
4783 identical_binary = True
4786 if not identical_binary:
4787 # If the ebuild no longer exists or it's
4788 # keywords have been dropped, reject built
4789 # instances (installed or binary).
4790 # If --usepkgonly is enabled, assume that
4791 # the ebuild status should be ignored.
4792 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
4793 if pkg.installed and pkg.masks:
4795 elif not self._equiv_ebuild_visible(pkg,
4796 autounmask_level=autounmask_level):
4799 # Calculation of USE for unbuilt ebuilds is relatively
4800 # expensive, so it is only performed lazily, after the
4801 # above visibility checks are complete.
4805 for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
4806 if myarg.force_reinstall:
4809 except InvalidDependString:
4811 # masked by corruption
4813 if not installed and myarg:
4814 found_available_arg = True
4816 if atom.unevaluated_atom.use:
4817 #Make sure we don't miss a 'missing IUSE'.
4818 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
4819 # Don't add this to packages_with_invalid_use_config
4820 # since IUSE cannot be adjusted by the user.
4825 matched_pkgs_ignore_use.append(pkg)
4826 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
4828 for flag in atom.use.enabled:
4829 target_use[flag] = True
4830 for flag in atom.use.disabled:
4831 target_use[flag] = False
4832 use = self._pkg_use_enabled(pkg, target_use)
4834 use = self._pkg_use_enabled(pkg)
4837 can_adjust_use = not pkg.built
4838 is_valid_flag = pkg.iuse.is_valid_flag
4839 missing_enabled = frozenset(x for x in
4840 atom.use.missing_enabled if not is_valid_flag(x))
4841 missing_disabled = frozenset(x for x in
4842 atom.use.missing_disabled if not is_valid_flag(x))
4844 if atom.use.enabled:
4845 if any(x in atom.use.enabled for x in missing_disabled):
4847 can_adjust_use = False
4848 need_enabled = atom.use.enabled.difference(use)
4850 need_enabled = need_enabled.difference(missing_enabled)
4854 if any(x in pkg.use.mask for x in need_enabled):
4855 can_adjust_use = False
4857 if atom.use.disabled:
4858 if any(x in atom.use.disabled for x in missing_enabled):
4860 can_adjust_use = False
4861 need_disabled = atom.use.disabled.intersection(use)
4863 need_disabled = need_disabled.difference(missing_disabled)
4867 if any(x in pkg.use.force and x not in
4868 pkg.use.mask for x in need_disabled):
4869 can_adjust_use = False
4873 # Above we must ensure that this package has
4874 # absolutely no use.force, use.mask, or IUSE
4875 # issues that the user typically can't make
4876 # adjustments to solve (see bug #345979).
4877 # FIXME: Conditional USE deps complicate
4878 # issues. This code currently excludes cases
4879 # in which the user can adjust the parent
4880 # package's USE in order to satisfy the dep.
4881 packages_with_invalid_use_config.append(pkg)
4884 if pkg.cp == atom_cp:
4885 if highest_version is None:
4886 highest_version = pkg
4887 elif pkg > highest_version:
4888 highest_version = pkg
4889 # At this point, we've found the highest visible
4890 # match from the current repo. Any lower versions
4891 # from this repo are ignored, so this so the loop
4892 # will always end with a break statement below
4894 if find_existing_node:
4895 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4899 # Use PackageSet.findAtomForPackage()
4900 # for PROVIDE support.
4901 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
4902 if highest_version and \
4903 e_pkg.cp == atom_cp and \
4904 e_pkg < highest_version and \
4905 e_pkg.slot_atom != highest_version.slot_atom:
4906 # There is a higher version available in a
4907 # different slot, so this existing node is
4911 matched_packages.append(e_pkg)
4912 existing_node = e_pkg
4914 # Compare built package to current config and
4915 # reject the built package if necessary.
4916 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4917 ("--newuse" in self._frozen_config.myopts or \
4918 "--reinstall" in self._frozen_config.myopts or \
4919 (not installed and self._dynamic_config.myparams.get(
4920 "binpkg_respect_use") in ("y", "auto"))):
4921 iuses = pkg.iuse.all
4922 old_use = self._pkg_use_enabled(pkg)
4924 pkgsettings.setcpv(myeb)
4926 pkgsettings.setcpv(pkg)
4927 now_use = pkgsettings["PORTAGE_USE"].split()
4928 forced_flags = set()
4929 forced_flags.update(pkgsettings.useforce)
4930 forced_flags.update(pkgsettings.usemask)
4932 if myeb and not usepkgonly and not useoldpkg:
4933 cur_iuse = myeb.iuse.all
4934 reinstall_for_flags = self._reinstall_for_flags(pkg,
4935 forced_flags, old_use, iuses, now_use, cur_iuse)
4936 if reinstall_for_flags:
4937 if not pkg.installed:
4938 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4940 # Compare current config to installed package
4941 # and do not reinstall if possible.
4942 if not installed and not useoldpkg and \
4943 ("--newuse" in self._frozen_config.myopts or \
4944 "--reinstall" in self._frozen_config.myopts) and \
4945 cpv in vardb.match(atom):
4946 forced_flags = set()
4947 forced_flags.update(pkg.use.force)
4948 forced_flags.update(pkg.use.mask)
4949 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4950 old_use = inst_pkg.use.enabled
4951 old_iuse = inst_pkg.iuse.all
4952 cur_use = self._pkg_use_enabled(pkg)
4953 cur_iuse = pkg.iuse.all
4954 reinstall_for_flags = \
4955 self._reinstall_for_flags(pkg,
4956 forced_flags, old_use, old_iuse,
4958 if reinstall_for_flags:
4960 if reinstall_atoms.findAtomForPackage(pkg, \
4961 modified_use=self._pkg_use_enabled(pkg)):
4966 matched_oldpkg.append(pkg)
4967 matched_packages.append(pkg)
4968 if reinstall_for_flags:
4969 self._dynamic_config._reinstall_nodes[pkg] = \
4973 if not matched_packages:
4976 if "--debug" in self._frozen_config.myopts:
4977 for pkg in matched_packages:
4978 portage.writemsg("%s %s%s%s\n" % \
4979 ((pkg.type_name + ":").rjust(10),
4980 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4982 # Filter out any old-style virtual matches if they are
4983 # mixed with new-style virtual matches.
4985 if len(matched_packages) > 1 and \
4986 "virtual" == portage.catsplit(cp)[0]:
4987 for pkg in matched_packages:
4990 # Got a new-style virtual, so filter
4991 # out any old-style virtuals.
4992 matched_packages = [pkg for pkg in matched_packages \
4996 if existing_node is not None and \
4997 existing_node in matched_packages:
4998 return existing_node, existing_node
5000 if len(matched_packages) > 1:
5001 if rebuilt_binaries:
5005 for pkg in matched_packages:
5011 if unbuilt_pkg is None or pkg > unbuilt_pkg:
5013 if built_pkg is not None and inst_pkg is not None:
5014 # Only reinstall if binary package BUILD_TIME is
5015 # non-empty, in order to avoid cases like to
5016 # bug #306659 where BUILD_TIME fields are missing
5017 # in local and/or remote Packages file.
5018 built_timestamp = built_pkg.build_time
5019 installed_timestamp = inst_pkg.build_time
5021 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
5023 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
5024 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
5025 if built_timestamp and \
5026 built_timestamp > installed_timestamp and \
5027 built_timestamp >= minimal_timestamp:
5028 return built_pkg, existing_node
5030 #Don't care if the binary has an older BUILD_TIME than the installed
5031 #package. This is for closely tracking a binhost.
5032 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
5034 if built_timestamp and \
5035 built_timestamp != installed_timestamp:
5036 return built_pkg, existing_node
5038 for pkg in matched_packages:
5039 if pkg.installed and pkg.invalid:
5040 matched_packages = [x for x in \
5041 matched_packages if x is not pkg]
5044 for pkg in matched_packages:
5045 if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
5046 return pkg, existing_node
5048 visible_matches = []
5050 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
5051 if self._pkg_visibility_check(pkg, autounmask_level)]
5052 if not visible_matches:
5053 visible_matches = [pkg.cpv for pkg in matched_packages \
5054 if self._pkg_visibility_check(pkg, autounmask_level)]
5056 bestmatch = portage.best(visible_matches)
5058 # all are masked, so ignore visibility
5059 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
5060 matched_packages = [pkg for pkg in matched_packages \
5061 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5063 # ordered by type preference ("ebuild" type is the last resort)
5064 return matched_packages[-1], existing_node
5066 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5068 Select packages that have already been added to the graph or
5069 those that are installed and have not been scheduled for
5072 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
5073 matches = graph_db.match_pkgs(atom)
5076 pkg = matches[-1] # highest match
5077 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
5078 return pkg, in_graph
5080 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
5082 Select packages that are installed.
5084 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
5088 if len(matches) > 1:
5089 matches.reverse() # ascending order
5090 unmasked = [pkg for pkg in matches if \
5091 self._pkg_visibility_check(pkg)]
5093 if len(unmasked) == 1:
5096 # Account for packages with masks (like KEYWORDS masks)
5097 # that are usually ignored in visibility checks for
5098 # installed packages, in order to handle cases like
5100 unmasked = [pkg for pkg in matches if not pkg.masks]
5103 if len(matches) > 1:
5104 # Now account for packages for which existing
5105 # ebuilds are masked or unavailable (bug #445506).
5106 unmasked = [pkg for pkg in matches if
5107 self._equiv_ebuild_visible(pkg)]
5111 pkg = matches[-1] # highest match
5112 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
5113 return pkg, in_graph
5115 def _complete_graph(self, required_sets=None):
5117 Add any deep dependencies of required sets (args, system, world) that
5118 have not been pulled into the graph yet. This ensures that the graph
5119 is consistent such that initially satisfied deep dependencies are not
5120 broken in the new graph. Initially unsatisfied dependencies are
5121 irrelevant since we only want to avoid breaking dependencies that are
5122 initially satisfied.
5124 Since this method can consume enough time to disturb users, it is
5125 currently only enabled by the --complete-graph option.
5127 @param required_sets: contains required sets (currently only used
5128 for depclean and prune removal operations)
5129 @type required_sets: dict
5131 if "--buildpkgonly" in self._frozen_config.myopts or \
5132 "recurse" not in self._dynamic_config.myparams:
5135 complete_if_new_use = self._dynamic_config.myparams.get(
5136 "complete_if_new_use", "y") == "y"
5137 complete_if_new_ver = self._dynamic_config.myparams.get(
5138 "complete_if_new_ver", "y") == "y"
5139 rebuild_if_new_slot = self._dynamic_config.myparams.get(
5140 "rebuild_if_new_slot", "y") == "y"
5141 complete_if_new_slot = rebuild_if_new_slot
5143 if "complete" not in self._dynamic_config.myparams and \
5144 (complete_if_new_use or
5145 complete_if_new_ver or complete_if_new_slot):
5146 # Enable complete mode if an installed package will change somehow.
5148 version_change = False
5149 for node in self._dynamic_config.digraph:
5150 if not isinstance(node, Package) or \
5151 node.operation != "merge":
5153 vardb = self._frozen_config.roots[
5154 node.root].trees["vartree"].dbapi
5156 if complete_if_new_use or complete_if_new_ver:
5157 inst_pkg = vardb.match_pkgs(node.slot_atom)
5158 if inst_pkg and inst_pkg[0].cp == node.cp:
5159 inst_pkg = inst_pkg[0]
5160 if complete_if_new_ver:
5161 if inst_pkg < node or node < inst_pkg:
5162 version_change = True
5164 elif not (inst_pkg.slot == node.slot and
5165 inst_pkg.sub_slot == node.sub_slot):
5166 # slot/sub-slot change without revbump gets
5167 # similar treatment to a version change
5168 version_change = True
5171 # Intersect enabled USE with IUSE, in order to
5172 # ignore forced USE from implicit IUSE flags, since
5173 # they're probably irrelevant and they are sensitive
5174 # to use.mask/force changes in the profile.
5175 if complete_if_new_use and \
5176 (node.iuse.all != inst_pkg.iuse.all or
5177 self._pkg_use_enabled(node).intersection(node.iuse.all) !=
5178 self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
5182 if complete_if_new_slot:
5183 cp_list = vardb.match_pkgs(Atom(node.cp))
5184 if (cp_list and cp_list[0].cp == node.cp and
5185 not any(node.slot == pkg.slot and
5186 node.sub_slot == pkg.sub_slot for pkg in cp_list)):
5187 version_change = True
5190 if use_change or version_change:
5191 self._dynamic_config.myparams["complete"] = True
5193 if "complete" not in self._dynamic_config.myparams:
5198 # Put the depgraph into a mode that causes it to only
5199 # select packages that have already been added to the
5200 # graph or those that are installed and have not been
5201 # scheduled for replacement. Also, toggle the "deep"
5202 # parameter so that all dependencies are traversed and
5204 self._dynamic_config._complete_mode = True
5205 self._select_atoms = self._select_atoms_from_graph
5206 if "remove" in self._dynamic_config.myparams:
5207 self._select_package = self._select_pkg_from_installed
5209 self._select_package = self._select_pkg_from_graph
5210 self._dynamic_config._traverse_ignored_deps = True
5211 already_deep = self._dynamic_config.myparams.get("deep") is True
5212 if not already_deep:
5213 self._dynamic_config.myparams["deep"] = True
5215 # Invalidate the package selection cache, since
5216 # _select_package has just changed implementations.
5217 for trees in self._dynamic_config._filtered_trees.values():
5218 trees["porttree"].dbapi._clear_cache()
5220 args = self._dynamic_config._initial_arg_list[:]
5221 for root in self._frozen_config.roots:
5222 if root != self._frozen_config.target_root and \
5223 ("remove" in self._dynamic_config.myparams or
5224 self._frozen_config.myopts.get("--root-deps") is not None):
5225 # Only pull in deps for the relevant root.
5227 depgraph_sets = self._dynamic_config.sets[root]
5228 required_set_names = self._frozen_config._required_set_names.copy()
5229 remaining_args = required_set_names.copy()
5230 if required_sets is None or root not in required_sets:
5233 # Removal actions may override sets with temporary
5234 # replacements that have had atoms removed in order
5235 # to implement --deselect behavior.
5236 required_set_names = set(required_sets[root])
5237 depgraph_sets.sets.clear()
5238 depgraph_sets.sets.update(required_sets[root])
5239 if "remove" not in self._dynamic_config.myparams and \
5240 root == self._frozen_config.target_root and \
5242 remaining_args.difference_update(depgraph_sets.sets)
5243 if not remaining_args and \
5244 not self._dynamic_config._ignored_deps and \
5245 not self._dynamic_config._dep_stack:
5247 root_config = self._frozen_config.roots[root]
5248 for s in required_set_names:
5249 pset = depgraph_sets.sets.get(s)
5251 pset = root_config.sets[s]
5252 atom = SETPREFIX + s
5253 args.append(SetArg(arg=atom, pset=pset,
5254 root_config=root_config))
5256 self._set_args(args)
5257 for arg in self._expand_set_args(args, add_to_digraph=True):
5258 for atom in arg.pset.getAtoms():
5259 self._dynamic_config._dep_stack.append(
5260 Dependency(atom=atom, root=arg.root_config.root,
5264 if self._dynamic_config._ignored_deps:
5265 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
5266 self._dynamic_config._ignored_deps = []
5267 if not self._create_graph(allow_unsatisfied=True):
5269 # Check the unsatisfied deps to see if any initially satisfied deps
5270 # will become unsatisfied due to an upgrade. Initially unsatisfied
5271 # deps are irrelevant since we only want to avoid breaking deps
5272 # that are initially satisfied.
5273 while self._dynamic_config._unsatisfied_deps:
5274 dep = self._dynamic_config._unsatisfied_deps.pop()
5275 vardb = self._frozen_config.roots[
5276 dep.root].trees["vartree"].dbapi
5277 matches = vardb.match_pkgs(dep.atom)
5279 self._dynamic_config._initially_unsatisfied_deps.append(dep)
5281 # An scheduled installation broke a deep dependency.
5282 # Add the installed package to the graph so that it
5283 # will be appropriately reported as a slot collision
5284 # (possibly solvable via backtracking).
5285 pkg = matches[-1] # highest match
5286 if not self._add_pkg(pkg, dep):
5288 if not self._create_graph(allow_unsatisfied=True):
5292 def _pkg(self, cpv, type_name, root_config, installed=False,
5293 onlydeps=False, myrepo = None):
5295 Get a package instance from the cache, or create a new
5296 one if necessary. Raises PackageNotFound from aux_get if it
5297 failures for some reason (package does not exist or is
5301 # Ensure that we use the specially optimized RootConfig instance
5302 # that refers to FakeVartree instead of the real vartree.
5303 root_config = self._frozen_config.roots[root_config.root]
5304 pkg = self._frozen_config._pkg_cache.get(
5305 Package._gen_hash_key(cpv=cpv, type_name=type_name,
5306 repo_name=myrepo, root_config=root_config,
5307 installed=installed, onlydeps=onlydeps))
5308 if pkg is None and onlydeps and not installed:
5309 # Maybe it already got pulled in as a "merge" node.
5310 pkg = self._dynamic_config.mydbapi[root_config.root].get(
5311 Package._gen_hash_key(cpv=cpv, type_name=type_name,
5312 repo_name=myrepo, root_config=root_config,
5313 installed=installed, onlydeps=False))
5316 tree_type = self.pkg_tree_map[type_name]
5317 db = root_config.trees[tree_type].dbapi
5318 db_keys = list(self._frozen_config._trees_orig[root_config.root][
5319 tree_type].dbapi._aux_cache_keys)
5322 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
5324 raise portage.exception.PackageNotFound(cpv)
5326 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
5327 installed=installed, metadata=metadata, onlydeps=onlydeps,
5328 root_config=root_config, type_name=type_name)
5330 self._frozen_config._pkg_cache[pkg] = pkg
5332 if not self._pkg_visibility_check(pkg) and \
5333 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
5334 slot_key = (pkg.root, pkg.slot_atom)
5335 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
5336 if other_pkg is None or pkg > other_pkg:
5337 self._frozen_config._highest_license_masked[slot_key] = pkg
5341 def _validate_blockers(self):
5342 """Remove any blockers from the digraph that do not match any of the
5343 packages within the graph. If necessary, create hard deps to ensure
5344 correct merge order such that mutually blocking packages are never
5345 installed simultaneously. Also add runtime blockers from all installed
5346 packages if any of them haven't been added already (bug 128809)."""
5348 if "--buildpkgonly" in self._frozen_config.myopts or \
5349 "--nodeps" in self._frozen_config.myopts:
5353 # Pull in blockers from all installed packages that haven't already
5354 # been pulled into the depgraph, in order to ensure that they are
5355 # respected (bug 128809). Due to the performance penalty that is
5356 # incurred by all the additional dep_check calls that are required,
5357 # blockers returned from dep_check are cached on disk by the
5358 # BlockerCache class.
5360 # For installed packages, always ignore blockers from DEPEND since
5361 # only runtime dependencies should be relevant for packages that
5362 # are already built.
5363 dep_keys = Package._runtime_keys
5364 for myroot in self._frozen_config.trees:
5366 if self._frozen_config.myopts.get("--root-deps") is not None and \
5367 myroot != self._frozen_config.target_root:
5370 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
5371 pkgsettings = self._frozen_config.pkgsettings[myroot]
5372 root_config = self._frozen_config.roots[myroot]
5373 final_db = self._dynamic_config.mydbapi[myroot]
5375 blocker_cache = BlockerCache(myroot, vardb)
5376 stale_cache = set(blocker_cache)
5379 stale_cache.discard(cpv)
5380 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
5382 pkg in self._dynamic_config._traversed_pkg_deps
5384 # Check for masked installed packages. Only warn about
5385 # packages that are in the graph in order to avoid warning
5386 # about those that will be automatically uninstalled during
5387 # the merge process or by --depclean. Always warn about
5388 # packages masked by license, since the user likely wants
5389 # to adjust ACCEPT_LICENSE.
5391 if not self._pkg_visibility_check(pkg,
5392 trust_graph=False) and \
5393 (pkg_in_graph or 'LICENSE' in pkg.masks):
5394 self._dynamic_config._masked_installed.add(pkg)
5396 self._check_masks(pkg)
5398 blocker_atoms = None
5404 self._dynamic_config._blocker_parents.child_nodes(pkg))
5409 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
5413 # Select just the runtime blockers.
5414 blockers = [blocker for blocker in blockers \
5415 if blocker.priority.runtime or \
5416 blocker.priority.runtime_post]
5417 if blockers is not None:
5418 blockers = set(blocker.atom for blocker in blockers)
5420 # If this node has any blockers, create a "nomerge"
5421 # node for it so that they can be enforced.
5422 self._spinner_update()
5423 blocker_data = blocker_cache.get(cpv)
5424 if blocker_data is not None and \
5425 blocker_data.counter != pkg.counter:
5428 # If blocker data from the graph is available, use
5429 # it to validate the cache and update the cache if
5431 if blocker_data is not None and \
5432 blockers is not None:
5433 if not blockers.symmetric_difference(
5434 blocker_data.atoms):
5438 if blocker_data is None and \
5439 blockers is not None:
5440 # Re-use the blockers from the graph.
5441 blocker_atoms = sorted(blockers)
5443 blocker_cache.BlockerData(pkg.counter, blocker_atoms)
5444 blocker_cache[pkg.cpv] = blocker_data
5448 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
5450 # Use aux_get() to trigger FakeVartree global
5451 # updates on *DEPEND when appropriate.
5452 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5453 # It is crucial to pass in final_db here in order to
5454 # optimize dep_check calls by eliminating atoms via
5455 # dep_wordreduce and dep_eval calls.
5457 success, atoms = portage.dep_check(depstr,
5458 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
5459 trees=self._dynamic_config._graph_trees, myroot=myroot)
5462 except Exception as e:
5463 # This is helpful, for example, if a ValueError
5464 # is thrown from cpv_expand due to multiple
5465 # matches (this can happen if an atom lacks a
5467 show_invalid_depstring_notice(
5468 pkg, depstr, "%s" % (e,))
5472 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
5473 if replacement_pkg and \
5474 replacement_pkg[0].operation == "merge":
5475 # This package is being replaced anyway, so
5476 # ignore invalid dependencies so as not to
5477 # annoy the user too much (otherwise they'd be
5478 # forced to manually unmerge it first).
5480 show_invalid_depstring_notice(pkg, depstr, atoms)
5482 blocker_atoms = [myatom for myatom in atoms \
5484 blocker_atoms.sort()
5485 blocker_cache[cpv] = \
5486 blocker_cache.BlockerData(pkg.counter, blocker_atoms)
5489 for atom in blocker_atoms:
5490 blocker = Blocker(atom=atom,
5492 priority=self._priority(runtime=True),
5494 self._dynamic_config._blocker_parents.add(blocker, pkg)
5495 except portage.exception.InvalidAtom as e:
5496 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5497 show_invalid_depstring_notice(
5498 pkg, depstr, "Invalid Atom: %s" % (e,))
5500 for cpv in stale_cache:
5501 del blocker_cache[cpv]
5502 blocker_cache.flush()
5505 # Discard any "uninstall" tasks scheduled by previous calls
5506 # to this method, since those tasks may not make sense given
5507 # the current graph state.
5508 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
5509 if previous_uninstall_tasks:
5510 self._dynamic_config._blocker_uninstalls = digraph()
5511 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
5513 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
5514 self._spinner_update()
5515 root_config = self._frozen_config.roots[blocker.root]
5516 virtuals = root_config.settings.getvirtuals()
5517 myroot = blocker.root
5518 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
5519 final_db = self._dynamic_config.mydbapi[myroot]
5521 provider_virtual = False
5522 if blocker.cp in virtuals and \
5523 not self._have_new_virt(blocker.root, blocker.cp):
5524 provider_virtual = True
5526 # Use this to check PROVIDE for each matched package
5528 atom_set = InternalPackageSet(
5529 initial_atoms=[blocker.atom])
5531 if provider_virtual:
5533 for provider_entry in virtuals[blocker.cp]:
5534 atoms.append(Atom(blocker.atom.replace(
5535 blocker.cp, provider_entry.cp, 1)))
5537 atoms = [blocker.atom]
5539 blocked_initial = set()
5541 for pkg in initial_db.match_pkgs(atom):
5542 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5543 blocked_initial.add(pkg)
5545 blocked_final = set()
5547 for pkg in final_db.match_pkgs(atom):
5548 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5549 blocked_final.add(pkg)
5551 if not blocked_initial and not blocked_final:
5552 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
5553 self._dynamic_config._blocker_parents.remove(blocker)
5554 # Discard any parents that don't have any more blockers.
5555 for pkg in parent_pkgs:
5556 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
5557 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
5558 self._dynamic_config._blocker_parents.remove(pkg)
5560 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
5561 unresolved_blocks = False
5562 depends_on_order = set()
5563 for pkg in blocked_initial:
5564 if pkg.slot_atom == parent.slot_atom and \
5565 not blocker.atom.blocker.overlap.forbid:
5566 # New !!atom blockers do not allow temporary
5567 # simulaneous installation, so unlike !atom
5568 # blockers, !!atom blockers aren't ignored
5569 # when they match other packages occupying
5572 if parent.installed:
5573 # Two currently installed packages conflict with
5574 # eachother. Ignore this case since the damage
5575 # is already done and this would be likely to
5576 # confuse users if displayed like a normal blocker.
5579 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5581 if parent.operation == "merge":
5582 # Maybe the blocked package can be replaced or simply
5583 # unmerged to resolve this block.
5584 depends_on_order.add((pkg, parent))
5586 # None of the above blocker resolutions techniques apply,
5587 # so apparently this one is unresolvable.
5588 unresolved_blocks = True
5589 for pkg in blocked_final:
5590 if pkg.slot_atom == parent.slot_atom and \
5591 not blocker.atom.blocker.overlap.forbid:
5592 # New !!atom blockers do not allow temporary
5593 # simulaneous installation, so unlike !atom
5594 # blockers, !!atom blockers aren't ignored
5595 # when they match other packages occupying
5598 if parent.operation == "nomerge" and \
5599 pkg.operation == "nomerge":
5600 # This blocker will be handled the next time that a
5601 # merge of either package is triggered.
5604 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5606 # Maybe the blocking package can be
5607 # unmerged to resolve this block.
5608 if parent.operation == "merge" and pkg.installed:
5609 depends_on_order.add((pkg, parent))
5611 elif parent.operation == "nomerge":
5612 depends_on_order.add((parent, pkg))
5614 # None of the above blocker resolutions techniques apply,
5615 # so apparently this one is unresolvable.
5616 unresolved_blocks = True
5618 # Make sure we don't unmerge any package that have been pulled
5620 if not unresolved_blocks and depends_on_order:
5621 for inst_pkg, inst_task in depends_on_order:
5622 if self._dynamic_config.digraph.contains(inst_pkg) and \
5623 self._dynamic_config.digraph.parent_nodes(inst_pkg):
5624 unresolved_blocks = True
5627 if not unresolved_blocks and depends_on_order:
5628 for inst_pkg, inst_task in depends_on_order:
5629 uninst_task = Package(built=inst_pkg.built,
5630 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5631 metadata=inst_pkg._metadata,
5632 operation="uninstall",
5633 root_config=inst_pkg.root_config,
5634 type_name=inst_pkg.type_name)
5635 # Enforce correct merge order with a hard dep.
5636 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
5637 priority=BlockerDepPriority.instance)
5638 # Count references to this blocker so that it can be
5639 # invalidated after nodes referencing it have been
5641 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
5642 if not unresolved_blocks and not depends_on_order:
5643 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
5644 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
5645 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
5646 self._dynamic_config._blocker_parents.remove(blocker)
5647 if not self._dynamic_config._blocker_parents.child_nodes(parent):
5648 self._dynamic_config._blocker_parents.remove(parent)
5649 if unresolved_blocks:
5650 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
5654 def _accept_blocker_conflicts(self):
5656 for x in ("--buildpkgonly", "--fetchonly",
5657 "--fetch-all-uri", "--nodeps"):
5658 if x in self._frozen_config.myopts:
5663 def _merge_order_bias(self, mygraph):
5665 For optimal leaf node selection, promote deep system runtime deps and
5666 order nodes from highest to lowest overall reference count.
5670 for node in mygraph.order:
5671 node_info[node] = len(mygraph.parent_nodes(node))
5672 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
5674 def cmp_merge_preference(node1, node2):
5676 if node1.operation == 'uninstall':
5677 if node2.operation == 'uninstall':
5681 if node2.operation == 'uninstall':
5682 if node1.operation == 'uninstall':
5686 node1_sys = node1 in deep_system_deps
5687 node2_sys = node2 in deep_system_deps
5688 if node1_sys != node2_sys:
5693 return node_info[node2] - node_info[node1]
5695 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
5697 def altlist(self, reversed=False):
5699 while self._dynamic_config._serialized_tasks_cache is None:
5700 self._resolve_conflicts()
5702 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
5703 self._serialize_tasks()
5704 except self._serialize_tasks_retry:
5707 retlist = self._dynamic_config._serialized_tasks_cache
5709 # TODO: deprecate the "reversed" parameter (builtin name collision)
5710 retlist = list(retlist)
5712 retlist = tuple(retlist)
5716 def _implicit_libc_deps(self, mergelist, graph):
5718 Create implicit dependencies on libc, in order to ensure that libc
5719 is installed as early as possible (see bug #303567).
5722 implicit_libc_roots = (self._frozen_config._running_root.root,)
5723 for root in implicit_libc_roots:
5724 graphdb = self._dynamic_config.mydbapi[root]
5725 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5726 for atom in self._expand_virt_from_graph(root,
5727 portage.const.LIBC_PACKAGE_ATOM):
5730 match = graphdb.match_pkgs(atom)
5734 if pkg.operation == "merge" and \
5735 not vardb.cpv_exists(pkg.cpv):
5736 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
5741 earlier_libc_pkgs = set()
5743 for pkg in mergelist:
5744 if not isinstance(pkg, Package):
5745 # a satisfied blocker
5747 root_libc_pkgs = libc_pkgs.get(pkg.root)
5748 if root_libc_pkgs is not None and \
5749 pkg.operation == "merge":
5750 if pkg in root_libc_pkgs:
5751 earlier_libc_pkgs.add(pkg)
5753 for libc_pkg in root_libc_pkgs:
5754 if libc_pkg in earlier_libc_pkgs:
5755 graph.add(libc_pkg, pkg,
5756 priority=DepPriority(buildtime=True))
5758 def schedulerGraph(self):
5760 The scheduler graph is identical to the normal one except that
5761 uninstall edges are reversed in specific cases that require
5762 conflicting packages to be temporarily installed simultaneously.
5763 This is intended for use by the Scheduler in it's parallelization
5764 logic. It ensures that temporary simultaneous installation of
5765 conflicting packages is avoided when appropriate (especially for
5766 !!atom blockers), but allowed in specific cases that require it.
5768 Note that this method calls break_refs() which alters the state of
5769 internal Package instances such that this depgraph instance should
5770 not be used to perform any more calculations.
5773 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
5774 mergelist = self.altlist()
5775 self._implicit_libc_deps(mergelist,
5776 self._dynamic_config._scheduler_graph)
5778 # Break DepPriority.satisfied attributes which reference
5779 # installed Package instances.
5780 for parents, children, node in \
5781 self._dynamic_config._scheduler_graph.nodes.values():
5782 for priorities in chain(parents.values(), children.values()):
5783 for priority in priorities:
5784 if priority.satisfied:
5785 priority.satisfied = True
5787 pkg_cache = self._frozen_config._pkg_cache
5788 graph = self._dynamic_config._scheduler_graph
5789 trees = self._frozen_config.trees
5790 pruned_pkg_cache = {}
5791 for key, pkg in pkg_cache.items():
5792 if pkg in graph or \
5793 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
5794 pruned_pkg_cache[key] = pkg
5797 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
5801 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
5805 def break_refs(self):
5807 Break any references in Package instances that lead back to the depgraph.
5808 This is useful if you want to hold references to packages without also
5809 holding the depgraph on the heap. It should only be called after the
5810 depgraph and _frozen_config will not be used for any more calculations.
5812 for root_config in self._frozen_config.roots.values():
5813 root_config.update(self._frozen_config._trees_orig[
5814 root_config.root]["root_config"])
5815 # Both instances are now identical, so discard the
5816 # original which should have no other references.
5817 self._frozen_config._trees_orig[
5818 root_config.root]["root_config"] = root_config
5820 def _resolve_conflicts(self):
5822 if "complete" not in self._dynamic_config.myparams and \
5823 self._dynamic_config._allow_backtracking and \
5824 self._dynamic_config._slot_collision_nodes and \
5825 not self._accept_blocker_conflicts():
5826 self._dynamic_config.myparams["complete"] = True
5828 if not self._complete_graph():
5829 raise self._unknown_internal_error()
5831 self._process_slot_conflicts()
5833 self._slot_operator_trigger_reinstalls()
5835 if not self._validate_blockers():
5836 # Blockers don't trigger the _skip_restart flag, since
5837 # backtracking may solve blockers when it solves slot
5838 # conflicts (or by blind luck).
5839 raise self._unknown_internal_error()
5841 def _serialize_tasks(self):
5843 debug = "--debug" in self._frozen_config.myopts
5846 writemsg("\ndigraph:\n\n", noiselevel=-1)
5847 self._dynamic_config.digraph.debug_print()
5848 writemsg("\n", noiselevel=-1)
5850 scheduler_graph = self._dynamic_config.digraph.copy()
5852 if '--nodeps' in self._frozen_config.myopts:
5853 # Preserve the package order given on the command line.
5854 return ([node for node in scheduler_graph \
5855 if isinstance(node, Package) \
5856 and node.operation == 'merge'], scheduler_graph)
5858 mygraph=self._dynamic_config.digraph.copy()
5860 removed_nodes = set()
5862 # Prune off all DependencyArg instances since they aren't
5863 # needed, and because of nested sets this is faster than doing
5864 # it with multiple digraph.root_nodes() calls below. This also
5865 # takes care of nested sets that have circular references,
5866 # which wouldn't be matched by digraph.root_nodes().
5867 for node in mygraph:
5868 if isinstance(node, DependencyArg):
5869 removed_nodes.add(node)
5871 mygraph.difference_update(removed_nodes)
5872 removed_nodes.clear()
5874 # Prune "nomerge" root nodes if nothing depends on them, since
5875 # otherwise they slow down merge order calculation. Don't remove
5876 # non-root nodes since they help optimize merge order in some cases
5877 # such as revdep-rebuild.
5880 for node in mygraph.root_nodes():
5881 if not isinstance(node, Package) or \
5882 node.installed or node.onlydeps:
5883 removed_nodes.add(node)
5885 self._spinner_update()
5886 mygraph.difference_update(removed_nodes)
5887 if not removed_nodes:
5889 removed_nodes.clear()
5890 self._merge_order_bias(mygraph)
5891 def cmp_circular_bias(n1, n2):
5893 RDEPEND is stronger than PDEPEND and this function
5894 measures such a strength bias within a circular
5895 dependency relationship.
5897 n1_n2_medium = n2 in mygraph.child_nodes(n1,
5898 ignore_priority=priority_range.ignore_medium_soft)
5899 n2_n1_medium = n1 in mygraph.child_nodes(n2,
5900 ignore_priority=priority_range.ignore_medium_soft)
5901 if n1_n2_medium == n2_n1_medium:
5906 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
5908 # Contains uninstall tasks that have been scheduled to
5909 # occur after overlapping blockers have been installed.
5910 scheduled_uninstalls = set()
5911 # Contains any Uninstall tasks that have been ignored
5912 # in order to avoid the circular deps code path. These
5913 # correspond to blocker conflicts that could not be
5915 ignored_uninstall_tasks = set()
5916 have_uninstall_task = False
5917 complete = "complete" in self._dynamic_config.myparams
5920 def get_nodes(**kwargs):
5922 Returns leaf nodes excluding Uninstall instances
5923 since those should be executed as late as possible.
5925 return [node for node in mygraph.leaf_nodes(**kwargs) \
5926 if isinstance(node, Package) and \
5927 (node.operation != "uninstall" or \
5928 node in scheduled_uninstalls)]
5930 # sys-apps/portage needs special treatment if ROOT="/"
5931 running_root = self._frozen_config._running_root.root
5932 runtime_deps = InternalPackageSet(
5933 initial_atoms=[PORTAGE_PACKAGE_ATOM])
5934 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
5935 PORTAGE_PACKAGE_ATOM)
5936 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
5937 PORTAGE_PACKAGE_ATOM)
5940 running_portage = running_portage[0]
5942 running_portage = None
5944 if replacement_portage:
5945 replacement_portage = replacement_portage[0]
5947 replacement_portage = None
5949 if replacement_portage == running_portage:
5950 replacement_portage = None
5952 if running_portage is not None:
5954 portage_rdepend = self._select_atoms_highest_available(
5955 running_root, running_portage._metadata["RDEPEND"],
5956 myuse=self._pkg_use_enabled(running_portage),
5957 parent=running_portage, strict=False)
5958 except portage.exception.InvalidDependString as e:
5959 portage.writemsg("!!! Invalid RDEPEND in " + \
5960 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5961 (running_root, running_portage.cpv, e), noiselevel=-1)
5963 portage_rdepend = {running_portage : []}
5964 for atoms in portage_rdepend.values():
5965 runtime_deps.update(atom for atom in atoms \
5966 if not atom.blocker)
5968 # Merge libc asap, in order to account for implicit
5969 # dependencies. See bug #303567.
5970 implicit_libc_roots = (running_root,)
5971 for root in implicit_libc_roots:
5973 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5974 graphdb = self._dynamic_config.mydbapi[root]
5975 for atom in self._expand_virt_from_graph(root,
5976 portage.const.LIBC_PACKAGE_ATOM):
5979 match = graphdb.match_pkgs(atom)
5983 if pkg.operation == "merge" and \
5984 not vardb.cpv_exists(pkg.cpv):
5988 # If there's also an os-headers upgrade, we need to
5989 # pull that in first. See bug #328317.
5990 for atom in self._expand_virt_from_graph(root,
5991 portage.const.OS_HEADERS_PACKAGE_ATOM):
5994 match = graphdb.match_pkgs(atom)
5998 if pkg.operation == "merge" and \
5999 not vardb.cpv_exists(pkg.cpv):
6000 asap_nodes.append(pkg)
6002 asap_nodes.extend(libc_pkgs)
6004 def gather_deps(ignore_priority, mergeable_nodes,
6005 selected_nodes, node):
6007 Recursively gather a group of nodes that RDEPEND on
6008 eachother. This ensures that they are merged as a group
6009 and get their RDEPENDs satisfied as soon as possible.
6011 if node in selected_nodes:
6013 if node not in mergeable_nodes:
6015 if node == replacement_portage and \
6016 mygraph.child_nodes(node,
6017 ignore_priority=priority_range.ignore_medium_soft):
6018 # Make sure that portage always has all of it's
6019 # RDEPENDs installed first.
6021 selected_nodes.add(node)
6022 for child in mygraph.child_nodes(node,
6023 ignore_priority=ignore_priority):
6024 if not gather_deps(ignore_priority,
6025 mergeable_nodes, selected_nodes, child):
6029 def ignore_uninst_or_med(priority):
6030 if priority is BlockerDepPriority.instance:
6032 return priority_range.ignore_medium(priority)
6034 def ignore_uninst_or_med_soft(priority):
6035 if priority is BlockerDepPriority.instance:
6037 return priority_range.ignore_medium_soft(priority)
6039 tree_mode = "--tree" in self._frozen_config.myopts
6040 # Tracks whether or not the current iteration should prefer asap_nodes
6041 # if available. This is set to False when the previous iteration
6042 # failed to select any nodes. It is reset whenever nodes are
6043 # successfully selected.
6046 # Controls whether or not the current iteration should drop edges that
6047 # are "satisfied" by installed packages, in order to solve circular
6048 # dependencies. The deep runtime dependencies of installed packages are
6049 # not checked in this case (bug #199856), so it must be avoided
6050 # whenever possible.
6051 drop_satisfied = False
6053 # State of variables for successive iterations that loosen the
6054 # criteria for node selection.
6056 # iteration prefer_asap drop_satisfied
6061 # If no nodes are selected on the last iteration, it is due to
6062 # unresolved blockers or circular dependencies.
6065 self._spinner_update()
6066 selected_nodes = None
6067 ignore_priority = None
6068 if drop_satisfied or (prefer_asap and asap_nodes):
6069 priority_range = DepPrioritySatisfiedRange
6071 priority_range = DepPriorityNormalRange
6072 if prefer_asap and asap_nodes:
6073 # ASAP nodes are merged before their soft deps. Go ahead and
6074 # select root nodes here if necessary, since it's typical for
6075 # the parent to have been removed from the graph already.
6076 asap_nodes = [node for node in asap_nodes \
6077 if mygraph.contains(node)]
6078 for i in range(priority_range.SOFT,
6079 priority_range.MEDIUM_SOFT + 1):
6080 ignore_priority = priority_range.ignore_priority[i]
6081 for node in asap_nodes:
6082 if not mygraph.child_nodes(node,
6083 ignore_priority=ignore_priority):
6084 selected_nodes = [node]
6085 asap_nodes.remove(node)
6090 if not selected_nodes and \
6091 not (prefer_asap and asap_nodes):
6092 for i in range(priority_range.NONE,
6093 priority_range.MEDIUM_SOFT + 1):
6094 ignore_priority = priority_range.ignore_priority[i]
6095 nodes = get_nodes(ignore_priority=ignore_priority)
6097 # If there is a mixture of merges and uninstalls,
6098 # do the uninstalls first.
6099 good_uninstalls = None
6101 good_uninstalls = []
6103 if node.operation == "uninstall":
6104 good_uninstalls.append(node)
6107 nodes = good_uninstalls
6111 if good_uninstalls or len(nodes) == 1 or \
6112 (ignore_priority is None and \
6113 not asap_nodes and not tree_mode):
6114 # Greedily pop all of these nodes since no
6115 # relationship has been ignored. This optimization
6116 # destroys --tree output, so it's disabled in tree
6118 selected_nodes = nodes
6120 # For optimal merge order:
6121 # * Only pop one node.
6122 # * Removing a root node (node without a parent)
6123 # will not produce a leaf node, so avoid it.
6124 # * It's normal for a selected uninstall to be a
6125 # root node, so don't check them for parents.
6127 prefer_asap_parents = (True, False)
6129 prefer_asap_parents = (False,)
6130 for check_asap_parent in prefer_asap_parents:
6131 if check_asap_parent:
6133 parents = mygraph.parent_nodes(node,
6134 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
6135 if any(x in asap_nodes for x in parents):
6136 selected_nodes = [node]
6140 if mygraph.parent_nodes(node):
6141 selected_nodes = [node]
6148 if not selected_nodes:
6149 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
6151 mergeable_nodes = set(nodes)
6152 if prefer_asap and asap_nodes:
6154 # When gathering the nodes belonging to a runtime cycle,
6155 # we want to minimize the number of nodes gathered, since
6156 # this tends to produce a more optimal merge order.
6157 # Ignoring all medium_soft deps serves this purpose.
6158 # In the case of multiple runtime cycles, where some cycles
6159 # may depend on smaller independent cycles, it's optimal
6160 # to merge smaller independent cycles before other cycles
6161 # that depend on them. Therefore, we search for the
6162 # smallest cycle in order to try and identify and prefer
6163 # these smaller independent cycles.
6164 ignore_priority = priority_range.ignore_medium_soft
6165 smallest_cycle = None
6167 if not mygraph.parent_nodes(node):
6169 selected_nodes = set()
6170 if gather_deps(ignore_priority,
6171 mergeable_nodes, selected_nodes, node):
6172 # When selecting asap_nodes, we need to ensure
6173 # that we haven't selected a large runtime cycle
6174 # that is obviously sub-optimal. This will be
6175 # obvious if any of the non-asap selected_nodes
6176 # is a leaf node when medium_soft deps are
6178 if prefer_asap and asap_nodes and \
6179 len(selected_nodes) > 1:
6180 for node in selected_nodes.difference(
6182 if not mygraph.child_nodes(node,
6184 DepPriorityNormalRange.ignore_medium_soft):
6185 selected_nodes = None
6188 if smallest_cycle is None or \
6189 len(selected_nodes) < len(smallest_cycle):
6190 smallest_cycle = selected_nodes
6192 selected_nodes = smallest_cycle
6194 if selected_nodes and debug:
6195 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
6196 (len(selected_nodes),), noiselevel=-1)
6197 cycle_digraph = mygraph.copy()
6198 cycle_digraph.difference_update([x for x in
6199 cycle_digraph if x not in selected_nodes])
6200 cycle_digraph.debug_print()
6201 writemsg("\n", noiselevel=-1)
6203 if prefer_asap and asap_nodes and not selected_nodes:
6204 # We failed to find any asap nodes to merge, so ignore
6205 # them for the next iteration.
6209 if selected_nodes and ignore_priority is not None:
6210 # Try to merge ignored medium_soft deps as soon as possible
6211 # if they're not satisfied by installed packages.
6212 for node in selected_nodes:
6213 children = set(mygraph.child_nodes(node))
6214 soft = children.difference(
6215 mygraph.child_nodes(node,
6216 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
6217 medium_soft = children.difference(
6218 mygraph.child_nodes(node,
6220 DepPrioritySatisfiedRange.ignore_medium_soft))
6221 medium_soft.difference_update(soft)
6222 for child in medium_soft:
6223 if child in selected_nodes:
6225 if child in asap_nodes:
6227 # Merge PDEPEND asap for bug #180045.
6228 asap_nodes.append(child)
6230 if selected_nodes and len(selected_nodes) > 1:
6231 if not isinstance(selected_nodes, list):
6232 selected_nodes = list(selected_nodes)
6233 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
6235 if not selected_nodes and myblocker_uninstalls:
6236 # An Uninstall task needs to be executed in order to
6237 # avoid conflict if possible.
6240 priority_range = DepPrioritySatisfiedRange
6242 priority_range = DepPriorityNormalRange
6244 mergeable_nodes = get_nodes(
6245 ignore_priority=ignore_uninst_or_med)
6247 min_parent_deps = None
6250 for task in myblocker_uninstalls.leaf_nodes():
6251 # Do some sanity checks so that system or world packages
6252 # don't get uninstalled inappropriately here (only really
6253 # necessary when --complete-graph has not been enabled).
6255 if task in ignored_uninstall_tasks:
6258 if task in scheduled_uninstalls:
6259 # It's been scheduled but it hasn't
6260 # been executed yet due to dependence
6261 # on installation of blocking packages.
6264 root_config = self._frozen_config.roots[task.root]
6265 inst_pkg = self._pkg(task.cpv, "installed", root_config,
6268 if self._dynamic_config.digraph.contains(inst_pkg):
6271 forbid_overlap = False
6272 heuristic_overlap = False
6273 for blocker in myblocker_uninstalls.parent_nodes(task):
6274 if not eapi_has_strong_blocks(blocker.eapi):
6275 heuristic_overlap = True
6276 elif blocker.atom.blocker.overlap.forbid:
6277 forbid_overlap = True
6279 if forbid_overlap and running_root == task.root:
6282 if heuristic_overlap and running_root == task.root:
6283 # Never uninstall sys-apps/portage or it's essential
6284 # dependencies, except through replacement.
6286 runtime_dep_atoms = \
6287 list(runtime_deps.iterAtomsForPackage(task))
6288 except portage.exception.InvalidDependString as e:
6289 portage.writemsg("!!! Invalid PROVIDE in " + \
6290 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6291 (task.root, task.cpv, e), noiselevel=-1)
6295 # Don't uninstall a runtime dep if it appears
6296 # to be the only suitable one installed.
6298 vardb = root_config.trees["vartree"].dbapi
6299 for atom in runtime_dep_atoms:
6300 other_version = None
6301 for pkg in vardb.match_pkgs(atom):
6302 if pkg.cpv == task.cpv and \
6303 pkg.counter == task.counter:
6307 if other_version is None:
6313 # For packages in the system set, don't take
6314 # any chances. If the conflict can't be resolved
6315 # by a normal replacement operation then abort.
6318 for atom in root_config.sets[
6319 "system"].iterAtomsForPackage(task):
6322 except portage.exception.InvalidDependString as e:
6323 portage.writemsg("!!! Invalid PROVIDE in " + \
6324 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6325 (task.root, task.cpv, e), noiselevel=-1)
6331 # Note that the world check isn't always
6332 # necessary since self._complete_graph() will
6333 # add all packages from the system and world sets to the
6334 # graph. This just allows unresolved conflicts to be
6335 # detected as early as possible, which makes it possible
6336 # to avoid calling self._complete_graph() when it is
6337 # unnecessary due to blockers triggering an abortion.
6339 # For packages in the world set, go ahead an uninstall
6340 # when necessary, as long as the atom will be satisfied
6341 # in the final state.
6342 graph_db = self._dynamic_config.mydbapi[task.root]
6345 for atom in root_config.sets[
6346 "selected"].iterAtomsForPackage(task):
6348 for pkg in graph_db.match_pkgs(atom):
6355 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
6357 except portage.exception.InvalidDependString as e:
6358 portage.writemsg("!!! Invalid PROVIDE in " + \
6359 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6360 (task.root, task.cpv, e), noiselevel=-1)
6366 # Check the deps of parent nodes to ensure that
6367 # the chosen task produces a leaf node. Maybe
6368 # this can be optimized some more to make the
6369 # best possible choice, but the current algorithm
6370 # is simple and should be near optimal for most
6372 self._spinner_update()
6373 mergeable_parent = False
6375 parent_deps.add(task)
6376 for parent in mygraph.parent_nodes(task):
6377 parent_deps.update(mygraph.child_nodes(parent,
6378 ignore_priority=priority_range.ignore_medium_soft))
6379 if min_parent_deps is not None and \
6380 len(parent_deps) >= min_parent_deps:
6381 # This task is no better than a previously selected
6382 # task, so abort search now in order to avoid wasting
6383 # any more cpu time on this task. This increases
6384 # performance dramatically in cases when there are
6385 # hundreds of blockers to solve, like when
6386 # upgrading to a new slot of kde-meta.
6387 mergeable_parent = None
6389 if parent in mergeable_nodes and \
6390 gather_deps(ignore_uninst_or_med_soft,
6391 mergeable_nodes, set(), parent):
6392 mergeable_parent = True
6394 if not mergeable_parent:
6397 if min_parent_deps is None or \
6398 len(parent_deps) < min_parent_deps:
6399 min_parent_deps = len(parent_deps)
6402 if uninst_task is not None and min_parent_deps == 1:
6403 # This is the best possible result, so so abort search
6404 # now in order to avoid wasting any more cpu time.
6407 if uninst_task is not None:
6408 # The uninstall is performed only after blocking
6409 # packages have been merged on top of it. File
6410 # collisions between blocking packages are detected
6411 # and removed from the list of files to be uninstalled.
6412 scheduled_uninstalls.add(uninst_task)
6413 parent_nodes = mygraph.parent_nodes(uninst_task)
6415 # Reverse the parent -> uninstall edges since we want
6416 # to do the uninstall after blocking packages have
6417 # been merged on top of it.
6418 mygraph.remove(uninst_task)
6419 for blocked_pkg in parent_nodes:
6420 mygraph.add(blocked_pkg, uninst_task,
6421 priority=BlockerDepPriority.instance)
6422 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6423 scheduler_graph.add(blocked_pkg, uninst_task,
6424 priority=BlockerDepPriority.instance)
6426 # Sometimes a merge node will render an uninstall
6427 # node unnecessary (due to occupying the same SLOT),
6428 # and we want to avoid executing a separate uninstall
6429 # task in that case.
6430 slot_node = self._dynamic_config.mydbapi[uninst_task.root
6431 ].match_pkgs(uninst_task.slot_atom)
6433 slot_node[0].operation == "merge":
6434 mygraph.add(slot_node[0], uninst_task,
6435 priority=BlockerDepPriority.instance)
6437 # Reset the state variables for leaf node selection and
6438 # continue trying to select leaf nodes.
6440 drop_satisfied = False
6443 if not selected_nodes:
6444 # Only select root nodes as a last resort. This case should
6445 # only trigger when the graph is nearly empty and the only
6446 # remaining nodes are isolated (no parents or children). Since
6447 # the nodes must be isolated, ignore_priority is not needed.
6448 selected_nodes = get_nodes()
6450 if not selected_nodes and not drop_satisfied:
6451 drop_satisfied = True
6454 if not selected_nodes and myblocker_uninstalls:
6455 # If possible, drop an uninstall task here in order to avoid
6456 # the circular deps code path. The corresponding blocker will
6457 # still be counted as an unresolved conflict.
6459 for node in myblocker_uninstalls.leaf_nodes():
6461 mygraph.remove(node)
6466 ignored_uninstall_tasks.add(node)
6469 if uninst_task is not None:
6470 # Reset the state variables for leaf node selection and
6471 # continue trying to select leaf nodes.
6473 drop_satisfied = False
6476 if not selected_nodes:
6477 self._dynamic_config._circular_deps_for_display = mygraph
6478 self._dynamic_config._skip_restart = True
6479 raise self._unknown_internal_error()
6481 # At this point, we've succeeded in selecting one or more nodes, so
6482 # reset state variables for leaf node selection.
6484 drop_satisfied = False
6486 mygraph.difference_update(selected_nodes)
6488 for node in selected_nodes:
6489 if isinstance(node, Package) and \
6490 node.operation == "nomerge":
6493 # Handle interactions between blockers
6494 # and uninstallation tasks.
6495 solved_blockers = set()
6497 if isinstance(node, Package) and \
6498 "uninstall" == node.operation:
6499 have_uninstall_task = True
6502 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
6503 inst_pkg = vardb.match_pkgs(node.slot_atom)
6505 # The package will be replaced by this one, so remove
6506 # the corresponding Uninstall task if necessary.
6507 inst_pkg = inst_pkg[0]
6508 uninst_task = Package(built=inst_pkg.built,
6509 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6510 metadata=inst_pkg._metadata,
6511 operation="uninstall",
6512 root_config=inst_pkg.root_config,
6513 type_name=inst_pkg.type_name)
6515 mygraph.remove(uninst_task)
6519 if uninst_task is not None and \
6520 uninst_task not in ignored_uninstall_tasks and \
6521 myblocker_uninstalls.contains(uninst_task):
6522 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6523 myblocker_uninstalls.remove(uninst_task)
6524 # Discard any blockers that this Uninstall solves.
6525 for blocker in blocker_nodes:
6526 if not myblocker_uninstalls.child_nodes(blocker):
6527 myblocker_uninstalls.remove(blocker)
6529 self._dynamic_config._unsolvable_blockers:
6530 solved_blockers.add(blocker)
6532 retlist.append(node)
6534 if (isinstance(node, Package) and \
6535 "uninstall" == node.operation) or \
6536 (uninst_task is not None and \
6537 uninst_task in scheduled_uninstalls):
6538 # Include satisfied blockers in the merge list
6539 # since the user might be interested and also
6540 # it serves as an indicator that blocking packages
6541 # will be temporarily installed simultaneously.
6542 for blocker in solved_blockers:
6543 retlist.append(blocker)
6545 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
6546 for node in myblocker_uninstalls.root_nodes():
6547 unsolvable_blockers.add(node)
6549 # If any Uninstall tasks need to be executed in order
6550 # to avoid a conflict, complete the graph with any
6551 # dependencies that may have been initially
6552 # neglected (to ensure that unsafe Uninstall tasks
6553 # are properly identified and blocked from execution).
6554 if have_uninstall_task and \
6556 not unsolvable_blockers:
6557 self._dynamic_config.myparams["complete"] = True
6558 if '--debug' in self._frozen_config.myopts:
6560 msg.append("enabling 'complete' depgraph mode " + \
6561 "due to uninstall task(s):")
6563 for node in retlist:
6564 if isinstance(node, Package) and \
6565 node.operation == 'uninstall':
6566 msg.append("\t%s" % (node,))
6567 writemsg_level("\n%s\n" % \
6568 "".join("%s\n" % line for line in msg),
6569 level=logging.DEBUG, noiselevel=-1)
6570 raise self._serialize_tasks_retry("")
6572 # Set satisfied state on blockers, but not before the
6573 # above retry path, since we don't want to modify the
6574 # state in that case.
6575 for node in retlist:
6576 if isinstance(node, Blocker):
6577 node.satisfied = True
6579 for blocker in unsolvable_blockers:
6580 retlist.append(blocker)
6582 retlist = tuple(retlist)
6584 if unsolvable_blockers and \
6585 not self._accept_blocker_conflicts():
6586 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
6587 self._dynamic_config._serialized_tasks_cache = retlist
6588 self._dynamic_config._scheduler_graph = scheduler_graph
6589 # Blockers don't trigger the _skip_restart flag, since
6590 # backtracking may solve blockers when it solves slot
6591 # conflicts (or by blind luck).
6592 raise self._unknown_internal_error()
6594 if self._dynamic_config._slot_collision_info and \
6595 not self._accept_blocker_conflicts():
6596 self._dynamic_config._serialized_tasks_cache = retlist
6597 self._dynamic_config._scheduler_graph = scheduler_graph
6598 raise self._unknown_internal_error()
6600 return retlist, scheduler_graph
6602 def _show_circular_deps(self, mygraph):
6603 self._dynamic_config._circular_dependency_handler = \
6604 circular_dependency_handler(self, mygraph)
6605 handler = self._dynamic_config._circular_dependency_handler
6607 self._frozen_config.myopts.pop("--quiet", None)
6608 self._frozen_config.myopts["--verbose"] = True
6609 self._frozen_config.myopts["--tree"] = True
6610 portage.writemsg("\n\n", noiselevel=-1)
6611 self.display(handler.merge_list)
6612 prefix = colorize("BAD", " * ")
6613 portage.writemsg("\n", noiselevel=-1)
6614 portage.writemsg(prefix + "Error: circular dependencies:\n",
6616 portage.writemsg("\n", noiselevel=-1)
6618 if handler.circular_dep_message is None:
6619 handler.debug_print()
6620 portage.writemsg("\n", noiselevel=-1)
6622 if handler.circular_dep_message is not None:
6623 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
6625 suggestions = handler.suggestions
6627 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
6628 if len(suggestions) == 1:
6629 writemsg("by applying the following change:\n", noiselevel=-1)
6631 writemsg("by applying " + colorize("bold", "any of") + \
6632 " the following changes:\n", noiselevel=-1)
6633 writemsg("".join(suggestions), noiselevel=-1)
6634 writemsg("\nNote that this change can be reverted, once the package has" + \
6635 " been installed.\n", noiselevel=-1)
6636 if handler.large_cycle_count:
6637 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
6638 "Several changes might be required to resolve all cycles.\n" + \
6639 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
6641 writemsg("\n\n", noiselevel=-1)
6642 writemsg(prefix + "Note that circular dependencies " + \
6643 "can often be avoided by temporarily\n", noiselevel=-1)
6644 writemsg(prefix + "disabling USE flags that trigger " + \
6645 "optional dependencies.\n", noiselevel=-1)
6647 def _show_merge_list(self):
6648 if self._dynamic_config._serialized_tasks_cache is not None and \
6649 not (self._dynamic_config._displayed_list is not None and \
6650 (self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache or \
6651 self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
6652 self._dynamic_config._displayed_list == \
6653 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
6654 display_list = self._dynamic_config._serialized_tasks_cache
6655 if "--tree" in self._frozen_config.myopts:
6656 display_list = tuple(reversed(display_list))
6657 self.display(display_list)
6659 def _show_unsatisfied_blockers(self, blockers):
6660 self._show_merge_list()
6661 msg = "Error: The above package list contains " + \
6662 "packages which cannot be installed " + \
6663 "at the same time on the same system."
6664 prefix = colorize("BAD", " * ")
6665 portage.writemsg("\n", noiselevel=-1)
6666 for line in textwrap.wrap(msg, 70):
6667 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6669 # Display the conflicting packages along with the packages
6670 # that pulled them in. This is helpful for troubleshooting
6671 # cases in which blockers don't solve automatically and
6672 # the reasons are not apparent from the normal merge list
6676 for blocker in blockers:
6677 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
6678 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
6679 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
6680 if not parent_atoms:
6681 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
6682 if atom is not None:
6683 parent_atoms = set([("@selected", atom)])
6685 conflict_pkgs[pkg] = parent_atoms
6688 # Reduce noise by pruning packages that are only
6689 # pulled in by other conflict packages.
6691 for pkg, parent_atoms in conflict_pkgs.items():
6692 relevant_parent = False
6693 for parent, atom in parent_atoms:
6694 if parent not in conflict_pkgs:
6695 relevant_parent = True
6697 if not relevant_parent:
6698 pruned_pkgs.add(pkg)
6699 for pkg in pruned_pkgs:
6700 del conflict_pkgs[pkg]
6706 for pkg, parent_atoms in conflict_pkgs.items():
6708 # Prefer packages that are not directly involved in a conflict.
6709 # It can be essential to see all the packages here, so don't
6710 # omit any. If the list is long, people can simply use a pager.
6711 preferred_parents = set()
6712 for parent_atom in parent_atoms:
6713 parent, atom = parent_atom
6714 if parent not in conflict_pkgs:
6715 preferred_parents.add(parent_atom)
6717 ordered_list = list(preferred_parents)
6718 if len(parent_atoms) > len(ordered_list):
6719 for parent_atom in parent_atoms:
6720 if parent_atom not in preferred_parents:
6721 ordered_list.append(parent_atom)
6723 msg.append(indent + "%s pulled in by\n" % pkg)
6725 for parent_atom in ordered_list:
6726 parent, atom = parent_atom
6727 msg.append(2*indent)
6728 if isinstance(parent,
6729 (PackageArg, AtomArg)):
6730 # For PackageArg and AtomArg types, it's
6731 # redundant to display the atom attribute.
6732 msg.append(str(parent))
6734 # Display the specific atom from SetArg or
6736 msg.append("%s required by %s" % (atom, parent))
6741 writemsg("".join(msg), noiselevel=-1)
6743 if "--quiet" not in self._frozen_config.myopts:
6744 show_blocker_docs_link()
6746 def display(self, mylist, favorites=[], verbosity=None):
6748 # This is used to prevent display_problems() from
6749 # redundantly displaying this exact same merge list
6750 # again via _show_merge_list().
6751 self._dynamic_config._displayed_list = mylist
6754 return display(self, mylist, favorites, verbosity)
6756 def _display_autounmask(self):
6758 Display --autounmask message and optionally write it to config files
6759 (using CONFIG_PROTECT). The message includes the comments and the changes.
6762 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
6763 autounmask_unrestricted_atoms = \
6764 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
6765 quiet = "--quiet" in self._frozen_config.myopts
6766 pretend = "--pretend" in self._frozen_config.myopts
6767 ask = "--ask" in self._frozen_config.myopts
6768 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
6770 def check_if_latest(pkg):
6772 is_latest_in_slot = True
6773 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
6774 root_config = self._frozen_config.roots[pkg.root]
6776 for db, pkg_type, built, installed, db_keys in dbs:
6777 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
6778 if other_pkg.cp != pkg.cp:
6779 # old-style PROVIDE virtual means there are no
6780 # normal matches for this pkg_type
6784 if other_pkg.slot_atom == pkg.slot_atom:
6785 is_latest_in_slot = False
6788 # iter_match_pkgs yields highest version first, so
6789 # there's no need to search this pkg_type any further
6792 if not is_latest_in_slot:
6795 return is_latest, is_latest_in_slot
6797 #Set of roots we have autounmask changes for.
6800 masked_by_missing_keywords = False
6801 unstable_keyword_msg = {}
6802 for pkg in self._dynamic_config._needed_unstable_keywords:
6803 self._show_merge_list()
6804 if pkg in self._dynamic_config.digraph:
6807 unstable_keyword_msg.setdefault(root, [])
6808 is_latest, is_latest_in_slot = check_if_latest(pkg)
6809 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6810 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6811 use=self._pkg_use_enabled(pkg))
6812 for reason in mreasons:
6813 if reason.unmask_hint and \
6814 reason.unmask_hint.key == 'unstable keyword':
6815 keyword = reason.unmask_hint.value
6817 masked_by_missing_keywords = True
6819 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
6820 if autounmask_unrestricted_atoms:
6822 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
6823 elif is_latest_in_slot:
6824 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
6826 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6828 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6830 p_mask_change_msg = {}
6831 for pkg in self._dynamic_config._needed_p_mask_changes:
6832 self._show_merge_list()
6833 if pkg in self._dynamic_config.digraph:
6836 p_mask_change_msg.setdefault(root, [])
6837 is_latest, is_latest_in_slot = check_if_latest(pkg)
6838 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6839 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6840 use=self._pkg_use_enabled(pkg))
6841 for reason in mreasons:
6842 if reason.unmask_hint and \
6843 reason.unmask_hint.key == 'p_mask':
6844 keyword = reason.unmask_hint.value
6846 comment, filename = portage.getmaskingreason(
6847 pkg.cpv, metadata=pkg._metadata,
6848 settings=pkgsettings,
6849 portdb=pkg.root_config.trees["porttree"].dbapi,
6850 return_location=True)
6852 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
6854 p_mask_change_msg[root].append("# %s:\n" % filename)
6856 comment = [line for line in
6857 comment.splitlines() if line]
6858 for line in comment:
6859 p_mask_change_msg[root].append("%s\n" % line)
6860 if autounmask_unrestricted_atoms:
6862 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
6863 elif is_latest_in_slot:
6864 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
6866 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6868 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6870 use_changes_msg = {}
6871 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
6872 self._show_merge_list()
6873 if pkg in self._dynamic_config.digraph:
6876 use_changes_msg.setdefault(root, [])
6877 is_latest, is_latest_in_slot = check_if_latest(pkg)
6878 changes = needed_use_config_change[1]
6880 for flag, state in changes.items():
6882 adjustments.append(flag)
6884 adjustments.append("-" + flag)
6885 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
6887 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6888 elif is_latest_in_slot:
6889 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
6891 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6894 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
6895 self._show_merge_list()
6896 if pkg in self._dynamic_config.digraph:
6899 license_msg.setdefault(root, [])
6900 is_latest, is_latest_in_slot = check_if_latest(pkg)
6902 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
6904 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6905 elif is_latest_in_slot:
6906 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
6908 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6910 def find_config_file(abs_user_config, file_name):
6912 Searches /etc/portage for an appropriate file to append changes to.
6913 If the file_name is a file it is returned, if it is a directory, the
6914 last file in it is returned. Order of traversal is the identical to
6915 portage.util.grablines(recursive=True).
6917 file_name - String containing a file name like "package.use"
6918 return value - String. Absolute path of file to write to. None if
6919 no suitable file exists.
6921 file_path = os.path.join(abs_user_config, file_name)
6925 except OSError as e:
6926 if e.errno == errno.ENOENT:
6927 # The file doesn't exist, so we'll
6931 # Disk or file system trouble?
6934 last_file_path = None
6943 if stat.S_ISREG(st.st_mode):
6945 elif stat.S_ISDIR(st.st_mode):
6946 if os.path.basename(p) in VCS_DIRS:
6949 contents = os.listdir(p)
6953 contents.sort(reverse=True)
6954 for child in contents:
6955 if child.startswith(".") or \
6956 child.endswith("~"):
6958 stack.append(os.path.join(p, child))
6960 return last_file_path
6962 write_to_file = autounmask_write and not pretend
6963 #Make sure we have a file to write to before doing any write.
6964 file_to_write_to = {}
6968 settings = self._frozen_config.roots[root].settings
6969 abs_user_config = os.path.join(
6970 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6972 if root in unstable_keyword_msg:
6973 if not os.path.exists(os.path.join(abs_user_config,
6974 "package.keywords")):
6975 filename = "package.accept_keywords"
6977 filename = "package.keywords"
6978 file_to_write_to[(abs_user_config, "package.keywords")] = \
6979 find_config_file(abs_user_config, filename)
6981 if root in p_mask_change_msg:
6982 file_to_write_to[(abs_user_config, "package.unmask")] = \
6983 find_config_file(abs_user_config, "package.unmask")
6985 if root in use_changes_msg:
6986 file_to_write_to[(abs_user_config, "package.use")] = \
6987 find_config_file(abs_user_config, "package.use")
6989 if root in license_msg:
6990 file_to_write_to[(abs_user_config, "package.license")] = \
6991 find_config_file(abs_user_config, "package.license")
6993 for (abs_user_config, f), path in file_to_write_to.items():
6995 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6997 write_to_file = not problems
6999 def format_msg(lines):
7001 for i, line in enumerate(lines):
7002 if line.startswith("#"):
7004 lines[i] = colorize("INFORM", line.rstrip()) + "\n"
7005 return "".join(lines)
7008 settings = self._frozen_config.roots[root].settings
7009 abs_user_config = os.path.join(
7010 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
7013 writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
7015 def _writemsg(reason, file):
7016 writemsg(('\nThe following %s are necessary to proceed:\n'
7017 ' (see "%s" in the portage(5) man page for more details)\n')
7018 % (colorize('BAD', reason), file), noiselevel=-1)
7020 if root in unstable_keyword_msg:
7021 _writemsg('keyword changes', 'package.accept_keywords')
7022 writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
7024 if root in p_mask_change_msg:
7025 _writemsg('mask changes', 'package.unmask')
7026 writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
7028 if root in use_changes_msg:
7029 _writemsg('USE changes', 'package.use')
7030 writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
7032 if root in license_msg:
7033 _writemsg('license changes', 'package.license')
7034 writemsg(format_msg(license_msg[root]), noiselevel=-1)
7039 settings = self._frozen_config.roots[root].settings
7040 protect_obj[root] = ConfigProtect(settings["EROOT"], \
7041 shlex_split(settings.get("CONFIG_PROTECT", "")),
7042 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
7044 def write_changes(root, changes, file_to_write_to):
7045 file_contents = None
7048 _unicode_encode(file_to_write_to,
7049 encoding=_encodings['fs'], errors='strict'),
7050 mode='r', encoding=_encodings['content'],
7051 errors='replace') as f:
7052 file_contents = f.readlines()
7053 except IOError as e:
7054 if e.errno == errno.ENOENT:
7057 problems.append("!!! Failed to read '%s': %s\n" % \
7058 (file_to_write_to, e))
7059 if file_contents is not None:
7060 file_contents.extend(changes)
7061 if protect_obj[root].isprotected(file_to_write_to):
7062 # We want to force new_protect_filename to ensure
7063 # that the user will see all our changes via
7064 # dispatch-conf, even if file_to_write_to doesn't
7065 # exist yet, so we specify force=True.
7066 file_to_write_to = new_protect_filename(file_to_write_to,
7069 write_atomic(file_to_write_to, "".join(file_contents))
7070 except PortageException:
7071 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
7073 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
7076 "NOTE: The --autounmask-keep-masks option will prevent emerge",
7077 " from creating package.unmask or ** keyword changes."
7081 line = colorize("INFORM", line)
7082 writemsg(line + "\n", noiselevel=-1)
7084 if ask and write_to_file and file_to_write_to:
7085 prompt = "\nWould you like to add these " + \
7086 "changes to your config files?"
7087 if userquery(prompt, enter_invalid) == 'No':
7088 write_to_file = False
7090 if write_to_file and file_to_write_to:
7092 settings = self._frozen_config.roots[root].settings
7093 abs_user_config = os.path.join(
7094 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
7095 ensure_dirs(abs_user_config)
7097 if root in unstable_keyword_msg:
7098 write_changes(root, unstable_keyword_msg[root],
7099 file_to_write_to.get((abs_user_config, "package.keywords")))
7101 if root in p_mask_change_msg:
7102 write_changes(root, p_mask_change_msg[root],
7103 file_to_write_to.get((abs_user_config, "package.unmask")))
7105 if root in use_changes_msg:
7106 write_changes(root, use_changes_msg[root],
7107 file_to_write_to.get((abs_user_config, "package.use")))
7109 if root in license_msg:
7110 write_changes(root, license_msg[root],
7111 file_to_write_to.get((abs_user_config, "package.license")))
7114 writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
7116 writemsg("".join(problems), noiselevel=-1)
7117 elif write_to_file and roots:
7118 writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
7120 elif not pretend and not autounmask_write and roots:
7121 writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
7122 "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
7123 "paying special attention to mask or keyword changes that may expose\n"
7124 "experimental or unstable packages.\n",
7128 def display_problems(self):
7130 Display problems with the dependency graph such as slot collisions.
7131 This is called internally by display() to show the problems _after_
7132 the merge list where it is most likely to be seen, but if display()
7133 is not going to be called then this method should be called explicitly
7134 to ensure that the user is notified of problems with the graph.
7137 if self._dynamic_config._circular_deps_for_display is not None:
7138 self._show_circular_deps(
7139 self._dynamic_config._circular_deps_for_display)
7141 # The slot conflict display has better noise reduction than
7142 # the unsatisfied blockers display, so skip unsatisfied blockers
7143 # display if there are slot conflicts (see bug #385391).
7144 if self._dynamic_config._slot_collision_info:
7145 self._show_slot_collision_notice()
7146 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
7147 self._show_unsatisfied_blockers(
7148 self._dynamic_config._unsatisfied_blockers_for_display)
7150 self._show_missed_update()
7152 self._show_ignored_binaries()
7154 self._display_autounmask()
7156 for depgraph_sets in self._dynamic_config.sets.values():
7157 for pset in depgraph_sets.sets.values():
7158 for error_msg in pset.errors:
7159 writemsg_level("%s\n" % (error_msg,),
7160 level=logging.ERROR, noiselevel=-1)
7162 # TODO: Add generic support for "set problem" handlers so that
7163 # the below warnings aren't special cases for world only.
7165 if self._dynamic_config._missing_args:
7166 world_problems = False
7167 if "world" in self._dynamic_config.sets[
7168 self._frozen_config.target_root].sets:
7169 # Filter out indirect members of world (from nested sets)
7170 # since only direct members of world are desired here.
7171 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
7172 for arg, atom in self._dynamic_config._missing_args:
7173 if arg.name in ("selected", "world") and atom in world_set:
7174 world_problems = True
7178 sys.stderr.write("\n!!! Problems have been " + \
7179 "detected with your world file\n")
7180 sys.stderr.write("!!! Please run " + \
7181 green("emaint --check world")+"\n\n")
7183 if self._dynamic_config._missing_args:
7184 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7185 " Ebuilds for the following packages are either all\n")
7186 sys.stderr.write(colorize("BAD", "!!!") + \
7187 " masked or don't exist:\n")
7188 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7189 self._dynamic_config._missing_args) + "\n")
7191 if self._dynamic_config._pprovided_args:
7193 for arg, atom in self._dynamic_config._pprovided_args:
7194 if isinstance(arg, SetArg):
7196 arg_atom = (atom, atom)
7199 arg_atom = (arg.arg, atom)
7200 refs = arg_refs.setdefault(arg_atom, [])
7201 if parent not in refs:
7204 msg.append(bad("\nWARNING: "))
7205 if len(self._dynamic_config._pprovided_args) > 1:
7206 msg.append("Requested packages will not be " + \
7207 "merged because they are listed in\n")
7209 msg.append("A requested package will not be " + \
7210 "merged because it is listed in\n")
7211 msg.append("package.provided:\n\n")
7212 problems_sets = set()
7213 for (arg, atom), refs in arg_refs.items():
7216 problems_sets.update(refs)
7218 ref_string = ", ".join(["'%s'" % name for name in refs])
7219 ref_string = " pulled in by " + ref_string
7220 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7222 if "selected" in problems_sets or "world" in problems_sets:
7223 msg.append("This problem can be solved in one of the following ways:\n\n")
7224 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7225 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7226 msg.append(" C) Remove offending entries from package.provided.\n\n")
7227 msg.append("The best course of action depends on the reason that an offending\n")
7228 msg.append("package.provided entry exists.\n\n")
7229 sys.stderr.write("".join(msg))
7231 masked_packages = []
7232 for pkg in self._dynamic_config._masked_license_updates:
7233 root_config = pkg.root_config
7234 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
7235 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
7236 masked_packages.append((root_config, pkgsettings,
7237 pkg.cpv, pkg.repo, pkg._metadata, mreasons))
7239 writemsg("\n" + colorize("BAD", "!!!") + \
7240 " The following updates are masked by LICENSE changes:\n",
7242 show_masked_packages(masked_packages)
7244 writemsg("\n", noiselevel=-1)
7246 masked_packages = []
7247 for pkg in self._dynamic_config._masked_installed:
7248 root_config = pkg.root_config
7249 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
7250 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
7251 masked_packages.append((root_config, pkgsettings,
7252 pkg.cpv, pkg.repo, pkg._metadata, mreasons))
7254 writemsg("\n" + colorize("BAD", "!!!") + \
7255 " The following installed packages are masked:\n",
7257 show_masked_packages(masked_packages)
7259 writemsg("\n", noiselevel=-1)
7261 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7262 self._show_unsatisfied_dep(*pargs,
7263 **portage._native_kwargs(kwargs))
7265 if self._dynamic_config._buildpkgonly_deps_unsatisfied:
7266 self._show_merge_list()
7267 writemsg("\n!!! --buildpkgonly requires all "
7268 "dependencies to be merged.\n", noiselevel=-1)
7269 writemsg("!!! Cannot merge requested packages. "
7270 "Merge deps and try again.\n\n", noiselevel=-1)
7272 def saveNomergeFavorites(self):
7273 """Find atoms in favorites that are not in the mergelist and add them
7274 to the world file if necessary."""
7275 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7276 "--oneshot", "--onlydeps", "--pretend"):
7277 if x in self._frozen_config.myopts:
7279 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7280 world_set = root_config.sets["selected"]
7282 world_locked = False
7283 if hasattr(world_set, "lock"):
7287 if hasattr(world_set, "load"):
7288 world_set.load() # maybe it's changed on disk
7290 args_set = self._dynamic_config.sets[
7291 self._frozen_config.target_root].sets['__non_set_args__']
7292 added_favorites = set()
7293 for x in self._dynamic_config._set_nodes:
7294 if x.operation != "nomerge":
7297 if x.root != root_config.root:
7301 myfavkey = create_world_atom(x, args_set, root_config)
7303 if myfavkey in added_favorites:
7305 added_favorites.add(myfavkey)
7306 except portage.exception.InvalidDependString as e:
7307 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7308 (x.cpv, e), noiselevel=-1)
7309 writemsg("!!! see '%s'\n\n" % os.path.join(
7310 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
7313 for arg in self._dynamic_config._initial_arg_list:
7314 if not isinstance(arg, SetArg):
7316 if arg.root_config.root != root_config.root:
7322 if k in ("selected", "world") or \
7323 not root_config.sets[k].world_candidate:
7328 all_added.append(SETPREFIX + k)
7329 all_added.extend(added_favorites)
7333 if "--ask" in self._frozen_config.myopts:
7334 writemsg_stdout("\n", noiselevel=-1)
7336 writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
7338 writemsg_stdout("\n", noiselevel=-1)
7339 prompt = "Would you like to add these packages to your world " \
7341 enter_invalid = '--ask-enter-invalid' in \
7342 self._frozen_config.myopts
7343 if userquery(prompt, enter_invalid) == "No":
7348 if a.startswith(SETPREFIX):
7349 filename = "world_sets"
7353 ">>> Recording %s in \"%s\" favorites file...\n" %
7354 (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
7355 world_set.update(all_added)
7360 def _loadResumeCommand(self, resume_data, skip_masked=True,
7363 Add a resume command to the graph and validate it in the process. This
7364 will raise a PackageNotFound exception if a package is not available.
7369 if not isinstance(resume_data, dict):
7372 mergelist = resume_data.get("mergelist")
7373 if not isinstance(mergelist, list):
7376 favorites = resume_data.get("favorites")
7377 if isinstance(favorites, list):
7378 args = self._load_favorites(favorites)
7382 fakedb = self._dynamic_config.mydbapi
7383 serialized_tasks = []
7386 if not (isinstance(x, list) and len(x) == 4):
7388 pkg_type, myroot, pkg_key, action = x
7389 if pkg_type not in self.pkg_tree_map:
7391 if action != "merge":
7393 root_config = self._frozen_config.roots[myroot]
7395 # Use the resume "favorites" list to see if a repo was specified
7397 depgraph_sets = self._dynamic_config.sets[root_config.root]
7399 for atom in depgraph_sets.atoms.getAtoms():
7400 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
7404 atom = "=" + pkg_key
7406 atom = atom + _repo_separator + repo
7409 atom = Atom(atom, allow_repo=True)
7414 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
7415 if not self._pkg_visibility_check(pkg) or \
7416 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
7417 modified_use=self._pkg_use_enabled(pkg)):
7422 # It does no exist or it is corrupt.
7424 # TODO: log these somewhere
7426 raise portage.exception.PackageNotFound(pkg_key)
7428 if "merge" == pkg.operation and \
7429 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
7430 modified_use=self._pkg_use_enabled(pkg)):
7433 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
7435 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7437 self._dynamic_config._unsatisfied_deps_for_display.append(
7438 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7440 fakedb[myroot].cpv_inject(pkg)
7441 serialized_tasks.append(pkg)
7442 self._spinner_update()
7444 if self._dynamic_config._unsatisfied_deps_for_display:
7447 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
7448 self._dynamic_config._serialized_tasks_cache = serialized_tasks
7449 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
7451 self._select_package = self._select_pkg_from_graph
7452 self._dynamic_config.myparams["selective"] = True
7453 # Always traverse deep dependencies in order to account for
7454 # potentially unsatisfied dependencies of installed packages.
7455 # This is necessary for correct --keep-going or --resume operation
7456 # in case a package from a group of circularly dependent packages
7457 # fails. In this case, a package which has recently been installed
7458 # may have an unsatisfied circular dependency (pulled in by
7459 # PDEPEND, for example). So, even though a package is already
7460 # installed, it may not have all of it's dependencies satisfied, so
7461 # it may not be usable. If such a package is in the subgraph of
7462 # deep depenedencies of a scheduled build, that build needs to
7463 # be cancelled. In order for this type of situation to be
7464 # recognized, deep traversal of dependencies is required.
7465 self._dynamic_config.myparams["deep"] = True
7467 for task in serialized_tasks:
7468 if isinstance(task, Package) and \
7469 task.operation == "merge":
7470 if not self._add_pkg(task, None):
7473 # Packages for argument atoms need to be explicitly
7474 # added via _add_pkg() so that they are included in the
7475 # digraph (needed at least for --tree display).
7476 for arg in self._expand_set_args(args, add_to_digraph=True):
7477 for atom in arg.pset.getAtoms():
7478 pkg, existing_node = self._select_package(
7479 arg.root_config.root, atom)
7480 if existing_node is None and \
7482 if not self._add_pkg(pkg, Dependency(atom=atom,
7483 root=pkg.root, parent=arg)):
7486 # Allow unsatisfied deps here to avoid showing a masking
7487 # message for an unsatisfied dep that isn't necessarily
7489 if not self._create_graph(allow_unsatisfied=True):
7492 unsatisfied_deps = []
7493 for dep in self._dynamic_config._unsatisfied_deps:
7494 if not isinstance(dep.parent, Package):
7496 if dep.parent.operation == "merge":
7497 unsatisfied_deps.append(dep)
7500 # For unsatisfied deps of installed packages, only account for
7501 # them if they are in the subgraph of dependencies of a package
7502 # which is scheduled to be installed.
7503 unsatisfied_install = False
7505 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
7507 node = dep_stack.pop()
7508 if not isinstance(node, Package):
7510 if node.operation == "merge":
7511 unsatisfied_install = True
7513 if node in traversed:
7516 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
7518 if unsatisfied_install:
7519 unsatisfied_deps.append(dep)
7521 if masked_tasks or unsatisfied_deps:
7522 # This probably means that a required package
7523 # was dropped via --skipfirst. It makes the
7524 # resume list invalid, so convert it to a
7525 # UnsatisfiedResumeDep exception.
7526 raise self.UnsatisfiedResumeDep(self,
7527 masked_tasks + unsatisfied_deps)
7528 self._dynamic_config._serialized_tasks_cache = None
7531 except self._unknown_internal_error:
7536 def _load_favorites(self, favorites):
7538 Use a list of favorites to resume state from a
7539 previous select_files() call. This creates similar
7540 DependencyArg instances to those that would have
7541 been created by the original select_files() call.
7542 This allows Package instances to be matched with
7543 DependencyArg instances during graph creation.
7545 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7546 sets = root_config.sets
7547 depgraph_sets = self._dynamic_config.sets[root_config.root]
7550 if not isinstance(x, basestring):
7552 if x in ("system", "world"):
7554 if x.startswith(SETPREFIX):
7555 s = x[len(SETPREFIX):]
7558 if s in depgraph_sets.sets:
7561 depgraph_sets.sets[s] = pset
7562 args.append(SetArg(arg=x, pset=pset,
7563 root_config=root_config))
7566 x = Atom(x, allow_repo=True)
7567 except portage.exception.InvalidAtom:
7569 args.append(AtomArg(arg=x, atom=x,
7570 root_config=root_config))
7572 self._set_args(args)
7575 class UnsatisfiedResumeDep(portage.exception.PortageException):
7577 A dependency of a resume list is not installed. This
7578 can occur when a required package is dropped from the
7579 merge list via --skipfirst.
7581 def __init__(self, depgraph, value):
7582 portage.exception.PortageException.__init__(self, value)
7583 self.depgraph = depgraph
7585 class _internal_exception(portage.exception.PortageException):
7586 def __init__(self, value=""):
7587 portage.exception.PortageException.__init__(self, value)
7589 class _unknown_internal_error(_internal_exception):
7591 Used by the depgraph internally to terminate graph creation.
7592 The specific reason for the failure should have been dumped
7593 to stderr, unfortunately, the exact reason for the failure
7597 class _serialize_tasks_retry(_internal_exception):
7599 This is raised by the _serialize_tasks() method when it needs to
7600 be called again for some reason. The only case that it's currently
7601 used for is when neglected dependencies need to be added to the
7602 graph in order to avoid making a potentially unsafe decision.
7605 class _backtrack_mask(_internal_exception):
7607 This is raised by _show_unsatisfied_dep() when it's called with
7608 check_backtrack=True and a matching package has been masked by
7612 class _autounmask_breakage(_internal_exception):
7614 This is raised by _show_unsatisfied_dep() when it's called with
7615 check_autounmask_breakage=True and a matching package has been
7616 been disqualified due to autounmask changes.
7619 def need_restart(self):
7620 return self._dynamic_config._need_restart and \
7621 not self._dynamic_config._skip_restart
7623 def success_without_autounmask(self):
7624 return self._dynamic_config._success_without_autounmask
7626 def autounmask_breakage_detected(self):
7628 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7629 self._show_unsatisfied_dep(
7630 *pargs, check_autounmask_breakage=True,
7631 **portage._native_kwargs(kwargs))
7632 except self._autounmask_breakage:
7636 def get_backtrack_infos(self):
7637 return self._dynamic_config._backtrack_infos
7640 class _dep_check_composite_db(dbapi):
7642 A dbapi-like interface that is optimized for use in dep_check() calls.
7643 This is built on top of the existing depgraph package selection logic.
7644 Some packages that have been added to the graph may be masked from this
7645 view in order to influence the atom preference selection that occurs
7648 def __init__(self, depgraph, root):
7649 dbapi.__init__(self)
7650 self._depgraph = depgraph
7652 self._match_cache = {}
7653 self._cpv_pkg_map = {}
7655 def _clear_cache(self):
7656 self._match_cache.clear()
7657 self._cpv_pkg_map.clear()
7659 def cp_list(self, cp):
7661 Emulate cp_list just so it can be used to check for existence
7662 of new-style virtuals. Since it's a waste of time to return
7663 more than one cpv for this use case, a maximum of one cpv will
7666 if isinstance(cp, Atom):
7671 for pkg in self._depgraph._iter_match_pkgs_any(
7672 self._depgraph._frozen_config.roots[self._root], atom):
7679 def match(self, atom):
7680 cache_key = (atom, atom.unevaluated_atom)
7681 ret = self._match_cache.get(cache_key)
7686 pkg, existing = self._depgraph._select_package(self._root, atom)
7688 if pkg is not None and self._visible(pkg):
7689 self._cpv_pkg_map[pkg.cpv] = pkg
7692 if pkg is not None and \
7693 atom.slot is None and \
7694 pkg.cp.startswith("virtual/") and \
7695 (("remove" not in self._depgraph._dynamic_config.myparams and
7696 "--update" not in self._depgraph._frozen_config.myopts) or
7698 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
7699 # For new-style virtual lookahead that occurs inside dep_check()
7700 # for bug #141118, examine all slots. This is needed so that newer
7701 # slots will not unnecessarily be pulled in when a satisfying lower
7702 # slot is already installed. For example, if virtual/jdk-1.5 is
7703 # satisfied via gcj-jdk then there's no need to pull in a newer
7704 # slot to satisfy a virtual/jdk dependency, unless --update is
7708 for virt_pkg in self._depgraph._iter_match_pkgs_any(
7709 self._depgraph._frozen_config.roots[self._root], atom):
7710 if virt_pkg.cp != pkg.cp:
7712 slots.add(virt_pkg.slot)
7714 slots.remove(pkg.slot)
7716 slot_atom = atom.with_slot(slots.pop())
7717 pkg, existing = self._depgraph._select_package(
7718 self._root, slot_atom)
7721 if not self._visible(pkg):
7723 self._cpv_pkg_map[pkg.cpv] = pkg
7727 self._cpv_sort_ascending(ret)
7729 self._match_cache[cache_key] = ret
7732 def _visible(self, pkg):
7733 if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
7735 if pkg.installed and \
7736 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
7737 # Account for packages with masks (like KEYWORDS masks)
7738 # that are usually ignored in visibility checks for
7739 # installed packages, in order to handle cases like
7741 myopts = self._depgraph._frozen_config.myopts
7742 use_ebuild_visibility = myopts.get(
7743 '--use-ebuild-visibility', 'n') != 'n'
7744 avoid_update = "--update" not in myopts and \
7745 "remove" not in self._depgraph._dynamic_config.myparams
7746 usepkgonly = "--usepkgonly" in myopts
7747 if not avoid_update:
7748 if not use_ebuild_visibility and usepkgonly:
7750 elif not self._depgraph._equiv_ebuild_visible(pkg):
7753 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
7754 self._root].get(pkg.slot_atom)
7755 if in_graph is None:
7756 # Mask choices for packages which are not the highest visible
7757 # version within their slot (since they usually trigger slot
7759 highest_visible, in_graph = self._depgraph._select_package(
7760 self._root, pkg.slot_atom)
7761 # Note: highest_visible is not necessarily the real highest
7762 # visible, especially when --update is not enabled, so use
7763 # < operator instead of !=.
7764 if highest_visible is not None and pkg < highest_visible:
7766 elif in_graph != pkg:
7767 # Mask choices for packages that would trigger a slot
7768 # conflict with a previously selected package.
7772 def aux_get(self, cpv, wants):
7773 metadata = self._cpv_pkg_map[cpv]._metadata
7774 return [metadata.get(x, "") for x in wants]
7776 def match_pkgs(self, atom):
7777 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
7779 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
7781 if "--quiet" in myopts:
7782 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7783 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
7784 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7785 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
7788 s = search(root_config, spinner, "--searchdesc" in myopts,
7789 "--quiet" not in myopts, "--usepkg" in myopts,
7790 "--usepkgonly" in myopts)
7791 null_cp = portage.dep_getkey(insert_category_into_atom(
7793 cat, atom_pn = portage.catsplit(null_cp)
7794 s.searchkey = atom_pn
7795 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7798 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7799 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
7801 def _spinner_start(spinner, myopts):
7804 if "--quiet" not in myopts and \
7805 ("--pretend" in myopts or "--ask" in myopts or \
7806 "--tree" in myopts or "--verbose" in myopts):
7808 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7810 elif "--buildpkgonly" in myopts:
7814 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
7815 if "--unordered-display" in myopts:
7816 portage.writemsg_stdout("\n" + \
7817 darkgreen("These are the packages that " + \
7818 "would be %s:" % action) + "\n\n")
7820 portage.writemsg_stdout("\n" + \
7821 darkgreen("These are the packages that " + \
7822 "would be %s, in reverse order:" % action) + "\n\n")
7824 portage.writemsg_stdout("\n" + \
7825 darkgreen("These are the packages that " + \
7826 "would be %s, in order:" % action) + "\n\n")
7828 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
7829 if not show_spinner:
7830 spinner.update = spinner.update_quiet
7833 portage.writemsg_stdout("Calculating dependencies ")
7835 def _spinner_stop(spinner):
7836 if spinner is None or \
7837 spinner.update == spinner.update_quiet:
7840 if spinner.update != spinner.update_basic:
7841 # update_basic is used for non-tty output,
7842 # so don't output backspaces in that case.
7843 portage.writemsg_stdout("\b\b")
7845 portage.writemsg_stdout("... done!\n")
7847 def backtrack_depgraph(settings, trees, myopts, myparams,
7848 myaction, myfiles, spinner):
7850 Raises PackageSetNotFound if myfiles contains a missing package set.
7852 _spinner_start(spinner, myopts)
7854 return _backtrack_depgraph(settings, trees, myopts, myparams,
7855 myaction, myfiles, spinner)
7857 _spinner_stop(spinner)
7860 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
7862 debug = "--debug" in myopts
7864 max_retries = myopts.get('--backtrack', 10)
7865 max_depth = max(1, (max_retries + 1) / 2)
7866 allow_backtracking = max_retries > 0
7867 backtracker = Backtracker(max_depth)
7870 frozen_config = _frozen_depgraph_config(settings, trees,
7875 if debug and mydepgraph is not None:
7877 "\n\nbacktracking try %s \n\n" % \
7878 backtracked, noiselevel=-1, level=logging.DEBUG)
7879 mydepgraph.display_problems()
7881 backtrack_parameters = backtracker.get()
7883 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7884 frozen_config=frozen_config,
7885 allow_backtracking=allow_backtracking,
7886 backtrack_parameters=backtrack_parameters)
7887 success, favorites = mydepgraph.select_files(myfiles)
7889 if success or mydepgraph.success_without_autounmask():
7891 elif not allow_backtracking:
7893 elif backtracked >= max_retries:
7895 elif mydepgraph.need_restart():
7897 backtracker.feedback(mydepgraph.get_backtrack_infos())
7901 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
7905 "\n\nbacktracking aborted after %s tries\n\n" % \
7906 backtracked, noiselevel=-1, level=logging.DEBUG)
7907 mydepgraph.display_problems()
7909 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7910 frozen_config=frozen_config,
7911 allow_backtracking=False,
7912 backtrack_parameters=backtracker.get_best_run())
7913 success, favorites = mydepgraph.select_files(myfiles)
7915 if not success and mydepgraph.autounmask_breakage_detected():
7918 "\n\nautounmask breakage detected\n\n",
7919 noiselevel=-1, level=logging.DEBUG)
7920 mydepgraph.display_problems()
7921 myopts["--autounmask"] = "n"
7922 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7923 frozen_config=frozen_config, allow_backtracking=False)
7924 success, favorites = mydepgraph.select_files(myfiles)
7926 return (success, mydepgraph, favorites)
7929 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7931 Raises PackageSetNotFound if myfiles contains a missing package set.
7933 _spinner_start(spinner, myopts)
7935 return _resume_depgraph(settings, trees, mtimedb, myopts,
7938 _spinner_stop(spinner)
7940 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7942 Construct a depgraph for the given resume list. This will raise
7943 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
7944 TODO: Return reasons for dropped_tasks, for display/logging.
7946 @return: (success, depgraph, dropped_tasks)
7949 skip_unsatisfied = True
7950 mergelist = mtimedb["resume"]["mergelist"]
7952 frozen_config = _frozen_depgraph_config(settings, trees,
7955 mydepgraph = depgraph(settings, trees,
7956 myopts, myparams, spinner, frozen_config=frozen_config)
7958 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7959 skip_masked=skip_masked)
7960 except depgraph.UnsatisfiedResumeDep as e:
7961 if not skip_unsatisfied:
7964 graph = mydepgraph._dynamic_config.digraph
7965 unsatisfied_parents = {}
7966 traversed_nodes = set()
7967 unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
7968 while unsatisfied_stack:
7969 pkg, atom = unsatisfied_stack.pop()
7970 if atom is not None and \
7971 mydepgraph._select_pkg_from_installed(
7972 pkg.root, atom)[0] is not None:
7974 atoms = unsatisfied_parents.get(pkg)
7977 unsatisfied_parents[pkg] = atoms
7978 if atom is not None:
7980 if pkg in traversed_nodes:
7982 traversed_nodes.add(pkg)
7984 # If this package was pulled in by a parent
7985 # package scheduled for merge, removing this
7986 # package may cause the the parent package's
7987 # dependency to become unsatisfied.
7988 for parent_node, atom in \
7989 mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
7990 if not isinstance(parent_node, Package) \
7991 or parent_node.operation not in ("merge", "nomerge"):
7993 # We need to traverse all priorities here, in order to
7994 # ensure that a package with an unsatisfied depenedency
7995 # won't get pulled in, even indirectly via a soft
7997 unsatisfied_stack.append((parent_node, atom))
7999 unsatisfied_tuples = frozenset(tuple(parent_node)
8000 for parent_node in unsatisfied_parents
8001 if isinstance(parent_node, Package))
8002 pruned_mergelist = []
8004 if isinstance(x, list) and \
8005 tuple(x) not in unsatisfied_tuples:
8006 pruned_mergelist.append(x)
8008 # If the mergelist doesn't shrink then this loop is infinite.
8009 if len(pruned_mergelist) == len(mergelist):
8010 # This happens if a package can't be dropped because
8011 # it's already installed, but it has unsatisfied PDEPEND.
8013 mergelist[:] = pruned_mergelist
8015 # Exclude installed packages that have been removed from the graph due
8016 # to failure to build/install runtime dependencies after the dependent
8017 # package has already been installed.
8018 dropped_tasks.update((pkg, atoms) for pkg, atoms in \
8019 unsatisfied_parents.items() if pkg.operation != "nomerge")
8021 del e, graph, traversed_nodes, \
8022 unsatisfied_parents, unsatisfied_stack
8026 return (success, mydepgraph, dropped_tasks)
8028 def get_mask_info(root_config, cpv, pkgsettings,
8029 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
8031 metadata = dict(zip(db_keys,
8032 db.aux_get(cpv, db_keys, myrepo=myrepo)))
8036 if metadata is None:
8037 mreasons = ["corruption"]
8039 eapi = metadata['EAPI']
8040 if not portage.eapi_is_supported(eapi):
8041 mreasons = ['EAPI %s' % eapi]
8043 pkg = Package(type_name=pkg_type, root_config=root_config,
8044 cpv=cpv, built=built, installed=installed, metadata=metadata)
8047 if _pkg_use_enabled is not None:
8048 modified_use = _pkg_use_enabled(pkg)
8050 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
8052 return metadata, mreasons
8054 def show_masked_packages(masked_packages):
8055 shown_licenses = set()
8056 shown_comments = set()
8057 # Maybe there is both an ebuild and a binary. Only
8058 # show one of them to avoid redundant appearance.
8060 have_eapi_mask = False
8061 for (root_config, pkgsettings, cpv, repo,
8062 metadata, mreasons) in masked_packages:
8065 output_cpv += _repo_separator + repo
8066 if output_cpv in shown_cpvs:
8068 shown_cpvs.add(output_cpv)
8069 eapi_masked = metadata is not None and \
8070 not portage.eapi_is_supported(metadata["EAPI"])
8072 have_eapi_mask = True
8073 # When masked by EAPI, metadata is mostly useless since
8074 # it doesn't contain essential things like SLOT.
8076 comment, filename = None, None
8077 if not eapi_masked and \
8078 "package.mask" in mreasons:
8079 comment, filename = \
8080 portage.getmaskingreason(
8081 cpv, metadata=metadata,
8082 settings=pkgsettings,
8083 portdb=root_config.trees["porttree"].dbapi,
8084 return_location=True)
8085 missing_licenses = []
8086 if not eapi_masked and metadata is not None:
8088 missing_licenses = \
8089 pkgsettings._getMissingLicenses(
8091 except portage.exception.InvalidDependString:
8092 # This will have already been reported
8093 # above via mreasons.
8096 writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
8099 if comment and comment not in shown_comments:
8100 writemsg(filename + ":\n" + comment + "\n",
8102 shown_comments.add(comment)
8103 portdb = root_config.trees["porttree"].dbapi
8104 for l in missing_licenses:
8105 if l in shown_licenses:
8107 l_path = portdb.findLicensePath(l)
8110 msg = ("A copy of the '%s' license" + \
8111 " is located at '%s'.\n\n") % (l, l_path)
8112 writemsg(msg, noiselevel=-1)
8113 shown_licenses.add(l)
8114 return have_eapi_mask
8116 def show_mask_docs():
8117 writemsg("For more information, see the MASKED PACKAGES "
8118 "section in the emerge\n", noiselevel=-1)
8119 writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
8121 def show_blocker_docs_link():
8122 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
8123 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
8124 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
8126 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
8127 return [mreason.message for \
8128 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
8130 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
8131 mreasons = _getmaskingstatus(
8132 pkg, settings=pkgsettings,
8133 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
8135 if not pkg.installed:
8136 if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
8137 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
8138 pkg._metadata["CHOST"]))
8141 for msgs in pkg.invalid.values():
8144 _MaskReason("invalid", "invalid: %s" % (msg,)))
8146 if not pkg._metadata["SLOT"]:
8148 _MaskReason("invalid", "SLOT: undefined"))