1 # Copyright 1999-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
13 from collections import deque
14 from itertools import chain
17 from portage import os, OrderedDict
18 from portage import _unicode_decode, _unicode_encode, _encodings
19 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
20 from portage.dbapi import dbapi
21 from portage.dbapi.dep_expand import dep_expand
22 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
23 check_required_use, human_readable_required_use, match_from_list, \
25 from portage.dep._slot_abi import ignore_built_slot_operator_deps
26 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
27 from portage.exception import (InvalidAtom, InvalidDependString,
28 PackageNotFound, PortageException)
29 from portage.output import colorize, create_color_func, \
31 bad = create_color_func("BAD")
32 from portage.package.ebuild.getmaskingstatus import \
33 _getmaskingstatus, _MaskReason
34 from portage._sets import SETPREFIX
35 from portage._sets.base import InternalPackageSet
36 from portage.util import ConfigProtect, shlex_split, new_protect_filename
37 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
38 from portage.util import ensure_dirs
39 from portage.util import writemsg_level, write_atomic
40 from portage.util.digraph import digraph
41 from portage.util.listdir import _ignorecvs_dirs
42 from portage.versions import catpkgsplit
44 from _emerge.AtomArg import AtomArg
45 from _emerge.Blocker import Blocker
46 from _emerge.BlockerCache import BlockerCache
47 from _emerge.BlockerDepPriority import BlockerDepPriority
48 from _emerge.countdown import countdown
49 from _emerge.create_world_atom import create_world_atom
50 from _emerge.Dependency import Dependency
51 from _emerge.DependencyArg import DependencyArg
52 from _emerge.DepPriority import DepPriority
53 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
54 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
55 from _emerge.FakeVartree import FakeVartree
56 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
57 from _emerge.is_valid_package_atom import insert_category_into_atom, \
59 from _emerge.Package import Package
60 from _emerge.PackageArg import PackageArg
61 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
62 from _emerge.RootConfig import RootConfig
63 from _emerge.search import search
64 from _emerge.SetArg import SetArg
65 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
66 from _emerge.UnmergeDepPriority import UnmergeDepPriority
67 from _emerge.UseFlagDisplay import pkg_use_display
68 from _emerge.userquery import userquery
70 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
71 from _emerge.resolver.slot_collision import slot_conflict_handler
72 from _emerge.resolver.circular_dependency import circular_dependency_handler
73 from _emerge.resolver.output import Display
75 if sys.hexversion >= 0x3000000:
82 class _scheduler_graph_config(object):
83 def __init__(self, trees, pkg_cache, graph, mergelist):
85 self.pkg_cache = pkg_cache
87 self.mergelist = mergelist
89 def _wildcard_set(atoms):
90 pkgs = InternalPackageSet(allow_wildcard=True)
93 x = Atom(x, allow_wildcard=True, allow_repo=False)
94 except portage.exception.InvalidAtom:
95 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
99 class _frozen_depgraph_config(object):
101 def __init__(self, settings, trees, myopts, spinner):
102 self.settings = settings
103 self.target_root = settings["EROOT"]
106 if settings.get("PORTAGE_DEBUG", "") == "1":
108 self.spinner = spinner
109 self._running_root = trees[trees._running_eroot]["root_config"]
110 self.pkgsettings = {}
112 self._trees_orig = trees
114 # All Package instances
116 self._highest_license_masked = {}
117 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
118 ignore_built_slot_operator_deps = myopts.get(
119 "--ignore-built-slot-operator-deps", "n") == "y"
121 self.trees[myroot] = {}
122 # Create a RootConfig instance that references
123 # the FakeVartree instead of the real one.
124 self.roots[myroot] = RootConfig(
125 trees[myroot]["vartree"].settings,
127 trees[myroot]["root_config"].setconfig)
128 for tree in ("porttree", "bintree"):
129 self.trees[myroot][tree] = trees[myroot][tree]
130 self.trees[myroot]["vartree"] = \
131 FakeVartree(trees[myroot]["root_config"],
132 pkg_cache=self._pkg_cache,
133 pkg_root_config=self.roots[myroot],
134 dynamic_deps=dynamic_deps,
135 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
136 self.pkgsettings[myroot] = portage.config(
137 clone=self.trees[myroot]["vartree"].settings)
139 self._required_set_names = set(["world"])
141 atoms = ' '.join(myopts.get("--exclude", [])).split()
142 self.excluded_pkgs = _wildcard_set(atoms)
143 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
144 self.reinstall_atoms = _wildcard_set(atoms)
145 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
146 self.usepkg_exclude = _wildcard_set(atoms)
147 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
148 self.useoldpkg_atoms = _wildcard_set(atoms)
149 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
150 self.rebuild_exclude = _wildcard_set(atoms)
151 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
152 self.rebuild_ignore = _wildcard_set(atoms)
154 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
155 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
156 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
158 class _depgraph_sets(object):
160 # contains all sets added to the graph
162 # contains non-set atoms given as arguments
163 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
164 # contains all atoms from all sets added to the graph, including
165 # atoms given as arguments
166 self.atoms = InternalPackageSet(allow_repo=True)
167 self.atom_arg_map = {}
169 class _rebuild_config(object):
170 def __init__(self, frozen_config, backtrack_parameters):
171 self._graph = digraph()
172 self._frozen_config = frozen_config
173 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
174 self.orig_rebuild_list = self.rebuild_list.copy()
175 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
176 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
177 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
178 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
179 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
180 self.rebuild_if_unbuilt)
182 def add(self, dep_pkg, dep):
183 parent = dep.collapsed_parent
184 priority = dep.collapsed_priority
185 rebuild_exclude = self._frozen_config.rebuild_exclude
186 rebuild_ignore = self._frozen_config.rebuild_ignore
187 if (self.rebuild and isinstance(parent, Package) and
188 parent.built and priority.buildtime and
189 isinstance(dep_pkg, Package) and
190 not rebuild_exclude.findAtomForPackage(parent) and
191 not rebuild_ignore.findAtomForPackage(dep_pkg)):
192 self._graph.add(dep_pkg, parent, priority)
194 def _needs_rebuild(self, dep_pkg):
195 """Check whether packages that depend on dep_pkg need to be rebuilt."""
196 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
197 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
200 if self.rebuild_if_unbuilt:
201 # dep_pkg is being installed from source, so binary
202 # packages for parents are invalid. Force rebuild
205 trees = self._frozen_config.trees
206 vardb = trees[dep_pkg.root]["vartree"].dbapi
207 if self.rebuild_if_new_rev:
208 # Parent packages are valid if a package with the same
209 # cpv is already installed.
210 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
212 # Otherwise, parent packages are valid if a package with the same
213 # version (excluding revision) is already installed.
214 assert self.rebuild_if_new_ver
215 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
216 for inst_cpv in vardb.match(dep_pkg.slot_atom):
217 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
218 if inst_cpv_norev == cpv_norev:
223 def _trigger_rebuild(self, parent, build_deps):
224 root_slot = (parent.root, parent.slot_atom)
225 if root_slot in self.rebuild_list:
227 trees = self._frozen_config.trees
229 for slot_atom, dep_pkg in build_deps.items():
230 dep_root_slot = (dep_pkg.root, slot_atom)
231 if self._needs_rebuild(dep_pkg):
232 self.rebuild_list.add(root_slot)
234 elif ("--usepkg" in self._frozen_config.myopts and
235 (dep_root_slot in self.reinstall_list or
236 dep_root_slot in self.rebuild_list or
237 not dep_pkg.installed)):
239 # A direct rebuild dependency is being installed. We
240 # should update the parent as well to the latest binary,
241 # if that binary is valid.
243 # To validate the binary, we check whether all of the
244 # rebuild dependencies are present on the same binhost.
246 # 1) If parent is present on the binhost, but one of its
247 # rebuild dependencies is not, then the parent should
248 # be rebuilt from source.
249 # 2) Otherwise, the parent binary is assumed to be valid,
250 # because all of its rebuild dependencies are
252 bintree = trees[parent.root]["bintree"]
253 uri = bintree.get_pkgindex_uri(parent.cpv)
254 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
255 bindb = bintree.dbapi
256 if self.rebuild_if_new_ver and uri and uri != dep_uri:
257 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
258 for cpv in bindb.match(dep_pkg.slot_atom):
259 if cpv_norev == catpkgsplit(cpv)[:-1]:
260 dep_uri = bintree.get_pkgindex_uri(cpv)
263 if uri and uri != dep_uri:
264 # 1) Remote binary package is invalid because it was
265 # built without dep_pkg. Force rebuild.
266 self.rebuild_list.add(root_slot)
268 elif (parent.installed and
269 root_slot not in self.reinstall_list):
270 inst_build_time = parent.metadata.get("BUILD_TIME")
272 bin_build_time, = bindb.aux_get(parent.cpv,
276 if bin_build_time != inst_build_time:
277 # 2) Remote binary package is valid, and local package
278 # is not up to date. Force reinstall.
281 self.reinstall_list.add(root_slot)
284 def trigger_rebuilds(self):
286 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
287 depends on pkgA at both build-time and run-time, pkgB needs to be
294 leaf_nodes = deque(graph.leaf_nodes())
296 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
297 # will always know which children are being rebuilt.
300 # We'll have to drop an edge. This should be quite rare.
301 leaf_nodes.append(graph.order[-1])
303 node = leaf_nodes.popleft()
304 if node not in graph:
305 # This can be triggered by circular dependencies.
307 slot_atom = node.slot_atom
309 # Remove our leaf node from the graph, keeping track of deps.
310 parents = graph.parent_nodes(node)
312 node_build_deps = build_deps.get(node, {})
313 for parent in parents:
315 # Ignore a direct cycle.
317 parent_bdeps = build_deps.setdefault(parent, {})
318 parent_bdeps[slot_atom] = node
319 if not graph.child_nodes(parent):
320 leaf_nodes.append(parent)
322 # Trigger rebuilds for our leaf node. Because all of our children
323 # have been processed, the build_deps will be completely filled in,
324 # and self.rebuild_list / self.reinstall_list will tell us whether
325 # any of our children need to be rebuilt or reinstalled.
326 if self._trigger_rebuild(node, node_build_deps):
332 class _dynamic_depgraph_config(object):
334 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
335 self.myparams = myparams.copy()
336 self._vdb_loaded = False
337 self._allow_backtracking = allow_backtracking
338 # Maps slot atom to package for each Package added to the graph.
339 self._slot_pkg_map = {}
340 # Maps nodes to the reasons they were selected for reinstallation.
341 self._reinstall_nodes = {}
343 # Contains a filtered view of preferred packages that are selected
344 # from available repositories.
345 self._filtered_trees = {}
346 # Contains installed packages and new packages that have been added
348 self._graph_trees = {}
349 # Caches visible packages returned from _select_package, for use in
350 # depgraph._iter_atoms_for_pkg() SLOT logic.
351 self._visible_pkgs = {}
352 #contains the args created by select_files
353 self._initial_arg_list = []
354 self.digraph = portage.digraph()
355 # manages sets added to the graph
357 # contains all nodes pulled in by self.sets
358 self._set_nodes = set()
359 # Contains only Blocker -> Uninstall edges
360 self._blocker_uninstalls = digraph()
361 # Contains only Package -> Blocker edges
362 self._blocker_parents = digraph()
363 # Contains only irrelevant Package -> Blocker edges
364 self._irrelevant_blockers = digraph()
365 # Contains only unsolvable Package -> Blocker edges
366 self._unsolvable_blockers = digraph()
367 # Contains all Blocker -> Blocked Package edges
368 self._blocked_pkgs = digraph()
369 # Contains world packages that have been protected from
370 # uninstallation but may not have been added to the graph
371 # if the graph is not complete yet.
372 self._blocked_world_pkgs = {}
373 # Contains packages whose dependencies have been traversed.
374 # This use used to check if we have accounted for blockers
375 # relevant to a package.
376 self._traversed_pkg_deps = set()
377 # This should be ordered such that the backtracker will
378 # attempt to solve conflicts which occurred earlier first,
379 # since an earlier conflict can be the cause of a conflict
380 # which occurs later.
381 self._slot_collision_info = OrderedDict()
382 # Slot collision nodes are not allowed to block other packages since
383 # blocker validation is only able to account for one package per slot.
384 self._slot_collision_nodes = set()
385 self._parent_atoms = {}
386 self._slot_conflict_handler = None
387 self._circular_dependency_handler = None
388 self._serialized_tasks_cache = None
389 self._scheduler_graph = None
390 self._displayed_list = None
391 self._pprovided_args = []
392 self._missing_args = []
393 self._masked_installed = set()
394 self._masked_license_updates = set()
395 self._unsatisfied_deps_for_display = []
396 self._unsatisfied_blockers_for_display = None
397 self._circular_deps_for_display = None
399 self._dep_disjunctive_stack = []
400 self._unsatisfied_deps = []
401 self._initially_unsatisfied_deps = []
402 self._ignored_deps = []
403 self._highest_pkg_cache = {}
405 # Binary packages that have been rejected because their USE
406 # didn't match the user's config. It maps packages to a set
407 # of flags causing the rejection.
408 self.ignored_binaries = {}
410 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
411 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
412 self._needed_license_changes = backtrack_parameters.needed_license_changes
413 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
414 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
415 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
416 self._need_restart = False
417 # For conditions that always require user intervention, such as
418 # unsatisfied REQUIRED_USE (currently has no autounmask support).
419 self._skip_restart = False
420 self._backtrack_infos = {}
422 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
423 self._success_without_autounmask = False
424 self._traverse_ignored_deps = False
425 self._complete_mode = False
426 self._slot_operator_deps = {}
428 for myroot in depgraph._frozen_config.trees:
429 self.sets[myroot] = _depgraph_sets()
430 self._slot_pkg_map[myroot] = {}
431 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
432 # This dbapi instance will model the state that the vdb will
433 # have after new packages have been installed.
434 fakedb = PackageVirtualDbapi(vardb.settings)
436 self.mydbapi[myroot] = fakedb
439 graph_tree.dbapi = fakedb
440 self._graph_trees[myroot] = {}
441 self._filtered_trees[myroot] = {}
442 # Substitute the graph tree for the vartree in dep_check() since we
443 # want atom selections to be consistent with package selections
444 # have already been made.
445 self._graph_trees[myroot]["porttree"] = graph_tree
446 self._graph_trees[myroot]["vartree"] = graph_tree
447 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
448 self._graph_trees[myroot]["graph"] = self.digraph
451 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
452 self._filtered_trees[myroot]["porttree"] = filtered_tree
453 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
455 # Passing in graph_tree as the vartree here could lead to better
456 # atom selections in some cases by causing atoms for packages that
457 # have been added to the graph to be preferred over other choices.
458 # However, it can trigger atom selections that result in
459 # unresolvable direct circular dependencies. For example, this
460 # happens with gwydion-dylan which depends on either itself or
461 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
462 # gwydion-dylan-bin needs to be selected in order to avoid a
463 # an unresolvable direct circular dependency.
465 # To solve the problem described above, pass in "graph_db" so that
466 # packages that have been added to the graph are distinguishable
467 # from other available packages and installed packages. Also, pass
468 # the parent package into self._select_atoms() calls so that
469 # unresolvable direct circular dependencies can be detected and
470 # avoided when possible.
471 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
472 self._filtered_trees[myroot]["graph"] = self.digraph
473 self._filtered_trees[myroot]["vartree"] = \
474 depgraph._frozen_config.trees[myroot]["vartree"]
477 # (db, pkg_type, built, installed, db_keys)
478 if "remove" in self.myparams:
479 # For removal operations, use _dep_check_composite_db
480 # for availability and visibility checks. This provides
481 # consistency with install operations, so we don't
482 # get install/uninstall cycles like in bug #332719.
483 self._graph_trees[myroot]["porttree"] = filtered_tree
485 if "--usepkgonly" not in depgraph._frozen_config.myopts:
486 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
487 db_keys = list(portdb._aux_cache_keys)
488 dbs.append((portdb, "ebuild", False, False, db_keys))
490 if "--usepkg" in depgraph._frozen_config.myopts:
491 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
492 db_keys = list(bindb._aux_cache_keys)
493 dbs.append((bindb, "binary", True, False, db_keys))
495 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
496 db_keys = list(depgraph._frozen_config._trees_orig[myroot
497 ]["vartree"].dbapi._aux_cache_keys)
498 dbs.append((vardb, "installed", True, True, db_keys))
499 self._filtered_trees[myroot]["dbs"] = dbs
501 class depgraph(object):
503 pkg_tree_map = RootConfig.pkg_tree_map
505 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
507 def __init__(self, settings, trees, myopts, myparams, spinner,
508 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
509 if frozen_config is None:
510 frozen_config = _frozen_depgraph_config(settings, trees,
512 self._frozen_config = frozen_config
513 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
514 allow_backtracking, backtrack_parameters)
515 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
517 self._select_atoms = self._select_atoms_highest_available
518 self._select_package = self._select_pkg_highest_available
522 Load installed package metadata if appropriate. This used to be called
523 from the constructor, but that wasn't very nice since this procedure
524 is slow and it generates spinner output. So, now it's called on-demand
525 by various methods when necessary.
528 if self._dynamic_config._vdb_loaded:
531 for myroot in self._frozen_config.trees:
533 dynamic_deps = self._dynamic_config.myparams.get(
534 "dynamic_deps", "y") != "n"
535 preload_installed_pkgs = \
536 "--nodeps" not in self._frozen_config.myopts
538 if self._frozen_config.myopts.get("--root-deps") is not None and \
539 myroot != self._frozen_config.target_root:
542 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
543 if not fake_vartree.dbapi:
544 # This needs to be called for the first depgraph, but not for
545 # backtracking depgraphs that share the same frozen_config.
548 # FakeVartree.sync() populates virtuals, and we want
549 # self.pkgsettings to have them populated too.
550 self._frozen_config.pkgsettings[myroot] = \
551 portage.config(clone=fake_vartree.settings)
553 if preload_installed_pkgs:
554 vardb = fake_vartree.dbapi
555 fakedb = self._dynamic_config._graph_trees[
556 myroot]["vartree"].dbapi
559 self._spinner_update()
561 # This causes FakeVartree to update the
562 # Package instance dependencies via
563 # PackageVirtualDbapi.aux_update()
564 vardb.aux_get(pkg.cpv, [])
565 fakedb.cpv_inject(pkg)
567 self._dynamic_config._vdb_loaded = True
569 def _spinner_update(self):
570 if self._frozen_config.spinner:
571 self._frozen_config.spinner.update()
573 def _show_ignored_binaries(self):
575 Show binaries that have been ignored because their USE didn't
576 match the user's config.
578 if not self._dynamic_config.ignored_binaries \
579 or '--quiet' in self._frozen_config.myopts \
580 or self._dynamic_config.myparams.get(
581 "binpkg_respect_use") in ("y", "n"):
584 for pkg in list(self._dynamic_config.ignored_binaries):
586 selected_pkg = self._dynamic_config.mydbapi[pkg.root
587 ].match_pkgs(pkg.slot_atom)
592 selected_pkg = selected_pkg[-1]
593 if selected_pkg > pkg:
594 self._dynamic_config.ignored_binaries.pop(pkg)
597 if selected_pkg.installed and \
598 selected_pkg.cpv == pkg.cpv and \
599 selected_pkg.metadata.get('BUILD_TIME') == \
600 pkg.metadata.get('BUILD_TIME'):
601 # We don't care about ignored binaries when an
602 # identical installed instance is selected to
604 self._dynamic_config.ignored_binaries.pop(pkg)
607 if not self._dynamic_config.ignored_binaries:
610 self._show_merge_list()
612 writemsg("\n!!! The following binary packages have been ignored " + \
613 "due to non matching USE:\n\n", noiselevel=-1)
615 for pkg, flags in self._dynamic_config.ignored_binaries.items():
616 writemsg(" =%s" % pkg.cpv, noiselevel=-1)
617 if pkg.root_config.settings["ROOT"] != "/":
618 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
619 writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
624 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
625 " from ignoring these binary packages if possible.",
626 " Using --binpkg-respect-use=y will silence this warning."
631 line = colorize("INFORM", line)
632 writemsg(line + "\n", noiselevel=-1)
634 def _show_missed_update(self):
636 # In order to minimize noise, show only the highest
637 # missed update from each SLOT.
639 for pkg, mask_reasons in \
640 self._dynamic_config._runtime_pkg_mask.items():
642 # Exclude installed here since we only
643 # want to show available updates.
645 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
646 ].match_pkgs(pkg.slot_atom)
647 if not chosen_pkg or chosen_pkg[-1] >= pkg:
649 k = (pkg.root, pkg.slot_atom)
650 if k in missed_updates:
651 other_pkg, mask_type, parent_atoms = missed_updates[k]
654 for mask_type, parent_atoms in mask_reasons.items():
657 missed_updates[k] = (pkg, mask_type, parent_atoms)
660 if not missed_updates:
663 missed_update_types = {}
664 for pkg, mask_type, parent_atoms in missed_updates.values():
665 missed_update_types.setdefault(mask_type,
666 []).append((pkg, parent_atoms))
668 if '--quiet' in self._frozen_config.myopts and \
669 '--debug' not in self._frozen_config.myopts:
670 missed_update_types.pop("slot conflict", None)
671 missed_update_types.pop("missing dependency", None)
673 self._show_missed_update_slot_conflicts(
674 missed_update_types.get("slot conflict"))
676 self._show_missed_update_unsatisfied_dep(
677 missed_update_types.get("missing dependency"))
679 def _show_missed_update_unsatisfied_dep(self, missed_updates):
681 if not missed_updates:
684 self._show_merge_list()
685 backtrack_masked = []
687 for pkg, parent_atoms in missed_updates:
690 for parent, root, atom in parent_atoms:
691 self._show_unsatisfied_dep(root, atom, myparent=parent,
692 check_backtrack=True)
693 except self._backtrack_mask:
694 # This is displayed below in abbreviated form.
695 backtrack_masked.append((pkg, parent_atoms))
698 writemsg("\n!!! The following update has been skipped " + \
699 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
701 writemsg(str(pkg.slot_atom), noiselevel=-1)
702 if pkg.root_config.settings["ROOT"] != "/":
703 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
704 writemsg("\n", noiselevel=-1)
706 for parent, root, atom in parent_atoms:
707 self._show_unsatisfied_dep(root, atom, myparent=parent)
708 writemsg("\n", noiselevel=-1)
711 # These are shown in abbreviated form, in order to avoid terminal
712 # flooding from mask messages as reported in bug #285832.
713 writemsg("\n!!! The following update(s) have been skipped " + \
714 "due to unsatisfied dependencies\n" + \
715 "!!! triggered by backtracking:\n\n", noiselevel=-1)
716 for pkg, parent_atoms in backtrack_masked:
717 writemsg(str(pkg.slot_atom), noiselevel=-1)
718 if pkg.root_config.settings["ROOT"] != "/":
719 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
720 writemsg("\n", noiselevel=-1)
722 def _show_missed_update_slot_conflicts(self, missed_updates):
724 if not missed_updates:
727 self._show_merge_list()
729 msg.append("\nWARNING: One or more updates have been " + \
730 "skipped due to a dependency conflict:\n\n")
733 for pkg, parent_atoms in missed_updates:
734 msg.append(str(pkg.slot_atom))
735 if pkg.root_config.settings["ROOT"] != "/":
736 msg.append(" for %s" % (pkg.root,))
739 for parent, atom in parent_atoms:
743 msg.append(" conflicts with\n")
745 if isinstance(parent,
746 (PackageArg, AtomArg)):
747 # For PackageArg and AtomArg types, it's
748 # redundant to display the atom attribute.
749 msg.append(str(parent))
751 # Display the specific atom from SetArg or
753 msg.append("%s required by %s" % (atom, parent))
757 writemsg("".join(msg), noiselevel=-1)
759 def _show_slot_collision_notice(self):
760 """Show an informational message advising the user to mask one of the
761 the packages. In some cases it may be possible to resolve this
762 automatically, but support for backtracking (removal nodes that have
763 already been selected) will be required in order to handle all possible
767 if not self._dynamic_config._slot_collision_info:
770 self._show_merge_list()
772 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
773 handler = self._dynamic_config._slot_conflict_handler
775 conflict = handler.get_conflict()
776 writemsg(conflict, noiselevel=-1)
778 explanation = handler.get_explanation()
780 writemsg(explanation, noiselevel=-1)
783 if "--quiet" in self._frozen_config.myopts:
787 msg.append("It may be possible to solve this problem ")
788 msg.append("by using package.mask to prevent one of ")
789 msg.append("those packages from being selected. ")
790 msg.append("However, it is also possible that conflicting ")
791 msg.append("dependencies exist such that they are impossible to ")
792 msg.append("satisfy simultaneously. If such a conflict exists in ")
793 msg.append("the dependencies of two different packages, then those ")
794 msg.append("packages can not be installed simultaneously.")
795 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
796 if not self._dynamic_config._allow_backtracking and \
797 (backtrack_opt is None or \
798 (backtrack_opt > 0 and backtrack_opt < 30)):
799 msg.append(" You may want to try a larger value of the ")
800 msg.append("--backtrack option, such as --backtrack=30, ")
801 msg.append("in order to see if that will solve this conflict ")
802 msg.append("automatically.")
804 for line in textwrap.wrap(''.join(msg), 70):
805 writemsg(line + '\n', noiselevel=-1)
806 writemsg('\n', noiselevel=-1)
809 msg.append("For more information, see MASKED PACKAGES ")
810 msg.append("section in the emerge man page or refer ")
811 msg.append("to the Gentoo Handbook.")
812 for line in textwrap.wrap(''.join(msg), 70):
813 writemsg(line + '\n', noiselevel=-1)
814 writemsg('\n', noiselevel=-1)
816 def _process_slot_conflicts(self):
818 If there are any slot conflicts and backtracking is enabled,
819 _complete_graph should complete the graph before this method
820 is called, so that all relevant reverse dependencies are
821 available for use in backtracking decisions.
823 for (slot_atom, root), slot_nodes in \
824 self._dynamic_config._slot_collision_info.items():
825 self._process_slot_conflict(root, slot_atom, slot_nodes)
827 def _process_slot_conflict(self, root, slot_atom, slot_nodes):
829 Process slot conflict data to identify specific atoms which
830 lead to conflict. These atoms only match a subset of the
831 packages that have been pulled into a given slot.
834 debug = "--debug" in self._frozen_config.myopts
836 slot_parent_atoms = set()
837 for pkg in slot_nodes:
838 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
841 slot_parent_atoms.update(parent_atoms)
845 for pkg in slot_nodes:
847 if self._dynamic_config._allow_backtracking and \
848 pkg in self._dynamic_config._runtime_pkg_mask:
851 "!!! backtracking loop detected: %s %s\n" % \
853 self._dynamic_config._runtime_pkg_mask[pkg]),
854 level=logging.DEBUG, noiselevel=-1)
856 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
857 if parent_atoms is None:
859 self._dynamic_config._parent_atoms[pkg] = parent_atoms
862 for parent_atom in slot_parent_atoms:
863 if parent_atom in parent_atoms:
865 # Use package set for matching since it will match via
866 # PROVIDE when necessary, while match_from_list does not.
867 parent, atom = parent_atom
868 atom_set = InternalPackageSet(
869 initial_atoms=(atom,), allow_repo=True)
870 if atom_set.findAtomForPackage(pkg,
871 modified_use=self._pkg_use_enabled(pkg)):
872 parent_atoms.add(parent_atom)
875 conflict_atoms.setdefault(parent_atom, set()).add(pkg)
878 conflict_pkgs.append(pkg)
880 if conflict_pkgs and \
881 self._dynamic_config._allow_backtracking and \
882 not self._accept_blocker_conflicts():
884 for pkg in conflict_pkgs:
885 if self._slot_conflict_backtrack_abi(pkg,
886 slot_nodes, conflict_atoms):
887 backtrack_infos = self._dynamic_config._backtrack_infos
888 config = backtrack_infos.setdefault("config", {})
889 config.setdefault("slot_conflict_abi", set()).add(pkg)
891 remaining.append(pkg)
893 self._slot_confict_backtrack(root, slot_atom,
894 slot_parent_atoms, remaining)
896 def _slot_confict_backtrack(self, root, slot_atom,
897 all_parents, conflict_pkgs):
899 debug = "--debug" in self._frozen_config.myopts
900 existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
902 # The ordering of backtrack_data can make
903 # a difference here, because both mask actions may lead
904 # to valid, but different, solutions and the one with
905 # 'existing_node' masked is usually the better one. Because
906 # of that, we choose an order such that
907 # the backtracker will first explore the choice with
908 # existing_node masked. The backtracker reverses the
909 # order, so the order it uses is the reverse of the
910 # order shown here. See bug #339606.
911 if existing_node in conflict_pkgs and \
912 existing_node is not conflict_pkgs[-1]:
913 conflict_pkgs.remove(existing_node)
914 conflict_pkgs.append(existing_node)
915 for to_be_masked in conflict_pkgs:
916 # For missed update messages, find out which
917 # atoms matched to_be_selected that did not
918 # match to_be_masked.
920 self._dynamic_config._parent_atoms.get(to_be_masked, set())
921 conflict_atoms = set(parent_atom for parent_atom in all_parents \
922 if parent_atom not in parent_atoms)
923 backtrack_data.append((to_be_masked, conflict_atoms))
925 if len(backtrack_data) > 1:
926 # NOTE: Generally, we prefer to mask the higher
927 # version since this solves common cases in which a
928 # lower version is needed so that all dependencies
929 # will be satisfied (bug #337178). However, if
930 # existing_node happens to be installed then we
931 # mask that since this is a common case that is
932 # triggered when --update is not enabled.
933 if existing_node.installed:
935 elif any(pkg > existing_node for pkg in conflict_pkgs):
936 backtrack_data.reverse()
938 to_be_masked = backtrack_data[-1][0]
940 self._dynamic_config._backtrack_infos.setdefault(
941 "slot conflict", []).append(backtrack_data)
942 self._dynamic_config._need_restart = True
947 msg.append("backtracking due to slot conflict:")
948 msg.append(" first package: %s" % existing_node)
949 msg.append(" package to mask: %s" % to_be_masked)
950 msg.append(" slot: %s" % slot_atom)
951 msg.append(" parents: %s" % ", ".join( \
952 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
954 writemsg_level("".join("%s\n" % l for l in msg),
955 noiselevel=-1, level=logging.DEBUG)
957 def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
959 If one or more conflict atoms have a slot/sub-slot dep that can be resolved
960 by rebuilding the parent package, then schedule the rebuild via
961 backtracking, and return True. Otherwise, return False.
965 for parent_atom, conflict_pkgs in conflict_atoms.items():
966 parent, atom = parent_atom
967 if atom.slot_operator != "=" or not parent.built:
970 if pkg not in conflict_pkgs:
973 for other_pkg in slot_nodes:
974 if other_pkg in conflict_pkgs:
977 dep = Dependency(atom=atom, child=other_pkg,
978 parent=parent, root=pkg.root)
980 if self._slot_operator_update_probe(dep):
981 self._slot_operator_update_backtrack(dep)
986 def _slot_operator_update_backtrack(self, dep, new_child_slot=None):
987 if new_child_slot is None:
990 child = new_child_slot
991 if "--debug" in self._frozen_config.myopts:
995 msg.append("backtracking due to missed slot abi update:")
996 msg.append(" child package: %s" % child)
997 if new_child_slot is not None:
998 msg.append(" new child slot package: %s" % new_child_slot)
999 msg.append(" parent package: %s" % dep.parent)
1000 msg.append(" atom: %s" % dep.atom)
1002 writemsg_level("\n".join(msg),
1003 noiselevel=-1, level=logging.DEBUG)
1004 backtrack_infos = self._dynamic_config._backtrack_infos
1005 config = backtrack_infos.setdefault("config", {})
1007 # mask unwanted binary packages if necessary
1009 if new_child_slot is None:
1010 if not child.installed:
1011 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
1012 if not dep.parent.installed:
1013 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
1015 config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
1017 # trigger replacement of installed packages if necessary
1018 abi_reinstalls = set()
1019 if dep.parent.installed:
1020 abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
1021 if new_child_slot is None and child.installed:
1022 abi_reinstalls.add((child.root, child.slot_atom))
1024 config.setdefault("slot_operator_replace_installed",
1025 set()).update(abi_reinstalls)
1027 self._dynamic_config._need_restart = True
1029 def _slot_operator_update_probe(self, dep, new_child_slot=False):
1031 slot/sub-slot := operators tend to prevent updates from getting pulled in,
1032 since installed packages pull in packages with the slot/sub-slot that they
1033 were built against. Detect this case so that we can schedule rebuilds
1034 and reinstalls when appropriate.
1035 NOTE: This function only searches for updates that involve upgrades
1036 to higher versions, since the logic required to detect when a
1037 downgrade would be desirable is not implemented.
1040 if dep.child.installed and \
1041 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
1042 modified_use=self._pkg_use_enabled(dep.child)):
1045 if dep.parent.installed and \
1046 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1047 modified_use=self._pkg_use_enabled(dep.parent)):
1050 debug = "--debug" in self._frozen_config.myopts
1051 want_downgrade = None
1053 for replacement_parent in self._iter_similar_available(dep.parent,
1054 dep.parent.slot_atom):
1056 for atom in replacement_parent.validated_atoms:
1057 if not atom.slot_operator == "=" or \
1059 atom.cp != dep.atom.cp:
1062 # Discard USE deps, we're only searching for an approximate
1063 # pattern, and dealing with USE states is too complex for
1065 atom = atom.without_use
1067 if replacement_parent.built and \
1068 portage.dep._match_slot(atom, dep.child):
1069 # Our selected replacement_parent appears to be built
1070 # for the existing child selection. So, discard this
1071 # parent and search for another.
1074 for pkg in self._iter_similar_available(
1076 if pkg.slot == dep.child.slot and \
1077 pkg.sub_slot == dep.child.sub_slot:
1078 # If slot/sub-slot is identical, then there's
1079 # no point in updating.
1082 if pkg.slot == dep.child.slot:
1085 # the new slot only matters if the
1086 # package version is higher
1089 if pkg.slot != dep.child.slot:
1092 if want_downgrade is None:
1093 want_downgrade = self._downgrade_probe(dep.child)
1094 # be careful not to trigger a rebuild when
1095 # the only version available with a
1096 # different slot_operator is an older version
1097 if not want_downgrade:
1104 msg.append("slot_operator_update_probe:")
1105 msg.append(" existing child package: %s" % dep.child)
1106 msg.append(" existing parent package: %s" % dep.parent)
1107 msg.append(" new child package: %s" % pkg)
1108 msg.append(" new parent package: %s" % replacement_parent)
1110 writemsg_level("\n".join(msg),
1111 noiselevel=-1, level=logging.DEBUG)
1119 msg.append("slot_operator_update_probe:")
1120 msg.append(" existing child package: %s" % dep.child)
1121 msg.append(" existing parent package: %s" % dep.parent)
1122 msg.append(" new child package: %s" % None)
1123 msg.append(" new parent package: %s" % None)
1125 writemsg_level("\n".join(msg),
1126 noiselevel=-1, level=logging.DEBUG)
1130 def _downgrade_probe(self, pkg):
1132 Detect cases where a downgrade of the given package is considered
1133 desirable due to the current version being masked or unavailable.
1135 available_pkg = None
1136 for available_pkg in self._iter_similar_available(pkg,
1138 if available_pkg >= pkg:
1139 # There's an available package of the same or higher
1140 # version, so downgrade seems undesirable.
1143 return available_pkg is not None
1145 def _iter_similar_available(self, graph_pkg, atom):
1147 Given a package that's in the graph, do a rough check to
1148 see if a similar package is available to install. The given
1149 graph_pkg itself may be yielded only if it's not installed.
1152 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
1153 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
1154 use_ebuild_visibility = self._frozen_config.myopts.get(
1155 '--use-ebuild-visibility', 'n') != 'n'
1157 for pkg in self._iter_match_pkgs_any(
1158 graph_pkg.root_config, atom):
1159 if pkg.cp != graph_pkg.cp:
1160 # discard old-style virtual match
1164 if pkg in self._dynamic_config._runtime_pkg_mask:
1166 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1167 modified_use=self._pkg_use_enabled(pkg)):
1169 if not self._pkg_visibility_check(pkg):
1172 if self._equiv_binary_installed(pkg):
1174 if not (not use_ebuild_visibility and
1175 (usepkgonly or useoldpkg_atoms.findAtomForPackage(
1176 pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
1177 not self._equiv_ebuild_visible(pkg):
1181 def _slot_operator_trigger_reinstalls(self):
1183 Search for packages with slot-operator deps on older slots, and schedule
1184 rebuilds if they can link to a newer slot that's in the graph.
1187 rebuild_if_new_slot = self._dynamic_config.myparams.get(
1188 "rebuild_if_new_slot", "y") == "y"
1190 for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
1192 for dep in slot_info:
1193 if not (dep.child.built and dep.parent and
1194 isinstance(dep.parent, Package) and dep.parent.built):
1197 # Check for slot update first, since we don't want to
1198 # trigger reinstall of the child package when a newer
1199 # slot will be used instead.
1200 if rebuild_if_new_slot:
1201 new_child = self._slot_operator_update_probe(dep,
1202 new_child_slot=True)
1204 self._slot_operator_update_backtrack(dep,
1205 new_child_slot=new_child)
1209 if self._slot_operator_update_probe(dep):
1210 self._slot_operator_update_backtrack(dep)
1213 def _reinstall_for_flags(self, pkg, forced_flags,
1214 orig_use, orig_iuse, cur_use, cur_iuse):
1215 """Return a set of flags that trigger reinstallation, or None if there
1216 are no such flags."""
1218 # binpkg_respect_use: Behave like newuse by default. If newuse is
1219 # False and changed_use is True, then behave like changed_use.
1220 binpkg_respect_use = (pkg.built and
1221 self._dynamic_config.myparams.get("binpkg_respect_use")
1223 newuse = "--newuse" in self._frozen_config.myopts
1224 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
1226 if newuse or (binpkg_respect_use and not changed_use):
1227 flags = set(orig_iuse.symmetric_difference(
1228 cur_iuse).difference(forced_flags))
1229 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
1230 cur_iuse.intersection(cur_use)))
1234 elif changed_use or binpkg_respect_use:
1235 flags = orig_iuse.intersection(orig_use).symmetric_difference(
1236 cur_iuse.intersection(cur_use))
1241 def _create_graph(self, allow_unsatisfied=False):
1242 dep_stack = self._dynamic_config._dep_stack
1243 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
1244 while dep_stack or dep_disjunctive_stack:
1245 self._spinner_update()
1247 dep = dep_stack.pop()
1248 if isinstance(dep, Package):
1249 if not self._add_pkg_deps(dep,
1250 allow_unsatisfied=allow_unsatisfied):
1253 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
1255 if dep_disjunctive_stack:
1256 if not self._pop_disjunction(allow_unsatisfied):
1260 def _expand_set_args(self, input_args, add_to_digraph=False):
1262 Iterate over a list of DependencyArg instances and yield all
1263 instances given in the input together with additional SetArg
1264 instances that are generated from nested sets.
1265 @param input_args: An iterable of DependencyArg instances
1266 @type input_args: Iterable
1267 @param add_to_digraph: If True then add SetArg instances
1268 to the digraph, in order to record parent -> child
1269 relationships from nested sets
1270 @type add_to_digraph: Boolean
1272 @return: All args given in the input together with additional
1273 SetArg instances that are generated from nested sets
1276 traversed_set_args = set()
1278 for arg in input_args:
1279 if not isinstance(arg, SetArg):
1283 root_config = arg.root_config
1284 depgraph_sets = self._dynamic_config.sets[root_config.root]
1287 arg = arg_stack.pop()
1288 if arg in traversed_set_args:
1290 traversed_set_args.add(arg)
1293 self._dynamic_config.digraph.add(arg, None,
1294 priority=BlockerDepPriority.instance)
1298 # Traverse nested sets and add them to the stack
1299 # if they're not already in the graph. Also, graph
1300 # edges between parent and nested sets.
1301 for token in arg.pset.getNonAtoms():
1302 if not token.startswith(SETPREFIX):
1304 s = token[len(SETPREFIX):]
1305 nested_set = depgraph_sets.sets.get(s)
1306 if nested_set is None:
1307 nested_set = root_config.sets.get(s)
1308 if nested_set is not None:
1309 nested_arg = SetArg(arg=token, pset=nested_set,
1310 root_config=root_config)
1311 arg_stack.append(nested_arg)
1313 self._dynamic_config.digraph.add(nested_arg, arg,
1314 priority=BlockerDepPriority.instance)
1315 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
1317 def _add_dep(self, dep, allow_unsatisfied=False):
1318 debug = "--debug" in self._frozen_config.myopts
1319 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
1320 nodeps = "--nodeps" in self._frozen_config.myopts
1322 if not buildpkgonly and \
1324 not dep.collapsed_priority.ignored and \
1325 not dep.collapsed_priority.optional and \
1326 dep.parent not in self._dynamic_config._slot_collision_nodes:
1327 if dep.parent.onlydeps:
1328 # It's safe to ignore blockers if the
1329 # parent is an --onlydeps node.
1331 # The blocker applies to the root where
1332 # the parent is or will be installed.
1333 blocker = Blocker(atom=dep.atom,
1334 eapi=dep.parent.metadata["EAPI"],
1335 priority=dep.priority, root=dep.parent.root)
1336 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
1339 if dep.child is None:
1340 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
1341 onlydeps=dep.onlydeps)
1343 # The caller has selected a specific package
1344 # via self._minimize_packages().
1346 existing_node = self._dynamic_config._slot_pkg_map[
1347 dep.root].get(dep_pkg.slot_atom)
1350 if (dep.collapsed_priority.optional or
1351 dep.collapsed_priority.ignored):
1352 # This is an unnecessary build-time dep.
1354 if allow_unsatisfied:
1355 self._dynamic_config._unsatisfied_deps.append(dep)
1357 self._dynamic_config._unsatisfied_deps_for_display.append(
1358 ((dep.root, dep.atom), {"myparent":dep.parent}))
1360 # The parent node should not already be in
1361 # runtime_pkg_mask, since that would trigger an
1362 # infinite backtracking loop.
1363 if self._dynamic_config._allow_backtracking:
1364 if dep.parent in self._dynamic_config._runtime_pkg_mask:
1367 "!!! backtracking loop detected: %s %s\n" % \
1369 self._dynamic_config._runtime_pkg_mask[
1370 dep.parent]), noiselevel=-1)
1371 elif not self.need_restart():
1372 # Do not backtrack if only USE have to be changed in
1373 # order to satisfy the dependency.
1374 dep_pkg, existing_node = \
1375 self._select_package(dep.root, dep.atom.without_use,
1376 onlydeps=dep.onlydeps)
1378 self._dynamic_config._backtrack_infos["missing dependency"] = dep
1379 self._dynamic_config._need_restart = True
1384 msg.append("backtracking due to unsatisfied dep:")
1385 msg.append(" parent: %s" % dep.parent)
1386 msg.append(" priority: %s" % dep.priority)
1387 msg.append(" root: %s" % dep.root)
1388 msg.append(" atom: %s" % dep.atom)
1390 writemsg_level("".join("%s\n" % l for l in msg),
1391 noiselevel=-1, level=logging.DEBUG)
1395 self._rebuild.add(dep_pkg, dep)
1397 ignore = dep.collapsed_priority.ignored and \
1398 not self._dynamic_config._traverse_ignored_deps
1399 if not ignore and not self._add_pkg(dep_pkg, dep):
1403 def _check_slot_conflict(self, pkg, atom):
1404 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1407 matches = pkg.cpv == existing_node.cpv
1408 if pkg != existing_node and \
1410 # Use package set for matching since it will match via
1411 # PROVIDE when necessary, while match_from_list does not.
1412 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1413 allow_repo=True).findAtomForPackage(existing_node,
1414 modified_use=self._pkg_use_enabled(existing_node)))
1416 return (existing_node, matches)
1418 def _add_pkg(self, pkg, dep):
1420 Adds a package to the depgraph, queues dependencies, and handles
1423 debug = "--debug" in self._frozen_config.myopts
1430 myparent = dep.parent
1431 priority = dep.priority
1433 if priority is None:
1434 priority = DepPriority()
1438 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1439 pkg_use_display(pkg, self._frozen_config.myopts,
1440 modified_use=self._pkg_use_enabled(pkg))),
1441 level=logging.DEBUG, noiselevel=-1)
1442 if isinstance(myparent,
1443 (PackageArg, AtomArg)):
1444 # For PackageArg and AtomArg types, it's
1445 # redundant to display the atom attribute.
1447 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1448 level=logging.DEBUG, noiselevel=-1)
1450 # Display the specific atom from SetArg or
1453 if dep.atom is not dep.atom.unevaluated_atom:
1454 uneval = " (%s)" % (dep.atom.unevaluated_atom,)
1456 "%s%s%s required by %s\n" %
1457 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
1458 level=logging.DEBUG, noiselevel=-1)
1460 # Ensure that the dependencies of the same package
1461 # are never processed more than once.
1462 previously_added = pkg in self._dynamic_config.digraph
1464 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1469 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1470 except portage.exception.InvalidDependString as e:
1471 if not pkg.installed:
1472 # should have been masked before it was selected
1476 # NOTE: REQUIRED_USE checks are delayed until after
1477 # package selection, since we want to prompt the user
1478 # for USE adjustment rather than have REQUIRED_USE
1479 # affect package selection and || dep choices.
1480 if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
1481 eapi_has_required_use(pkg.metadata["EAPI"]):
1482 required_use_is_sat = check_required_use(
1483 pkg.metadata["REQUIRED_USE"],
1484 self._pkg_use_enabled(pkg),
1485 pkg.iuse.is_valid_flag,
1486 eapi=pkg.metadata["EAPI"])
1487 if not required_use_is_sat:
1488 if dep.atom is not None and dep.parent is not None:
1489 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1492 for parent_atom in arg_atoms:
1493 parent, atom = parent_atom
1494 self._add_parent_atom(pkg, parent_atom)
1498 atom = Atom("=" + pkg.cpv)
1499 self._dynamic_config._unsatisfied_deps_for_display.append(
1501 {"myparent" : dep.parent, "show_req_use" : pkg}))
1502 self._dynamic_config._skip_restart = True
1505 if not pkg.onlydeps:
1507 existing_node, existing_node_matches = \
1508 self._check_slot_conflict(pkg, dep.atom)
1509 slot_collision = False
1511 if existing_node_matches:
1512 # The existing node can be reused.
1514 for parent_atom in arg_atoms:
1515 parent, atom = parent_atom
1516 self._dynamic_config.digraph.add(existing_node, parent,
1518 self._add_parent_atom(existing_node, parent_atom)
1519 # If a direct circular dependency is not an unsatisfied
1520 # buildtime dependency then drop it here since otherwise
1521 # it can skew the merge order calculation in an unwanted
1523 if existing_node != myparent or \
1524 (priority.buildtime and not priority.satisfied):
1525 self._dynamic_config.digraph.addnode(existing_node, myparent,
1527 if dep.atom is not None and dep.parent is not None:
1528 self._add_parent_atom(existing_node,
1529 (dep.parent, dep.atom))
1532 self._add_slot_conflict(pkg)
1535 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1536 existing_node, pkg_use_display(existing_node,
1537 self._frozen_config.myopts,
1538 modified_use=self._pkg_use_enabled(existing_node))),
1539 level=logging.DEBUG, noiselevel=-1)
1541 slot_collision = True
1544 # Now add this node to the graph so that self.display()
1545 # can show use flags and --tree portage.output. This node is
1546 # only being partially added to the graph. It must not be
1547 # allowed to interfere with the other nodes that have been
1548 # added. Do not overwrite data for existing nodes in
1549 # self._dynamic_config.mydbapi since that data will be used for blocker
1551 # Even though the graph is now invalid, continue to process
1552 # dependencies so that things like --fetchonly can still
1553 # function despite collisions.
1555 elif not previously_added:
1556 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1557 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1558 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1559 self._dynamic_config._highest_pkg_cache.clear()
1560 self._check_masks(pkg)
1562 if not pkg.installed:
1563 # Allow this package to satisfy old-style virtuals in case it
1564 # doesn't already. Any pre-existing providers will be preferred
1567 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1568 # For consistency, also update the global virtuals.
1569 settings = self._frozen_config.roots[pkg.root].settings
1571 settings.setinst(pkg.cpv, pkg.metadata)
1573 except portage.exception.InvalidDependString:
1574 if not pkg.installed:
1575 # should have been masked before it was selected
1579 self._dynamic_config._set_nodes.add(pkg)
1581 # Do this even when addme is False (--onlydeps) so that the
1582 # parent/child relationship is always known in case
1583 # self._show_slot_collision_notice() needs to be called later.
1584 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1585 if dep.atom is not None and dep.parent is not None:
1586 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1589 for parent_atom in arg_atoms:
1590 parent, atom = parent_atom
1591 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1592 self._add_parent_atom(pkg, parent_atom)
1594 # This section determines whether we go deeper into dependencies or not.
1595 # We want to go deeper on a few occasions:
1596 # Installing package A, we need to make sure package A's deps are met.
1597 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1598 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1599 if arg_atoms and depth > 0:
1600 for parent, atom in arg_atoms:
1601 if parent.reset_depth:
1605 if previously_added and pkg.depth is not None:
1606 depth = min(pkg.depth, depth)
1608 deep = self._dynamic_config.myparams.get("deep", 0)
1609 update = "--update" in self._frozen_config.myopts
1611 dep.want_update = (not self._dynamic_config._complete_mode and
1612 (arg_atoms or update) and
1613 not (deep is not True and depth > deep))
1616 if (not pkg.onlydeps and pkg.built and
1617 dep.atom and dep.atom.slot_operator_built):
1618 self._add_slot_operator_dep(dep)
1620 recurse = deep is True or depth + 1 <= deep
1621 dep_stack = self._dynamic_config._dep_stack
1622 if "recurse" not in self._dynamic_config.myparams:
1624 elif pkg.installed and not recurse:
1625 dep_stack = self._dynamic_config._ignored_deps
1627 self._spinner_update()
1629 if not previously_added:
1630 dep_stack.append(pkg)
1633 def _check_masks(self, pkg):
1635 slot_key = (pkg.root, pkg.slot_atom)
1637 # Check for upgrades in the same slot that are
1638 # masked due to a LICENSE change in a newer
1639 # version that is not masked for any other reason.
1640 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1641 if other_pkg is not None and pkg < other_pkg:
1642 self._dynamic_config._masked_license_updates.add(other_pkg)
1644 def _add_parent_atom(self, pkg, parent_atom):
1645 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1646 if parent_atoms is None:
1647 parent_atoms = set()
1648 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1649 parent_atoms.add(parent_atom)
1651 def _add_slot_operator_dep(self, dep):
1652 slot_key = (dep.root, dep.child.slot_atom)
1653 slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
1654 if slot_info is None:
1656 self._dynamic_config._slot_operator_deps[slot_key] = slot_info
1657 slot_info.append(dep)
1659 def _add_slot_conflict(self, pkg):
1660 self._dynamic_config._slot_collision_nodes.add(pkg)
1661 slot_key = (pkg.slot_atom, pkg.root)
1662 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1663 if slot_nodes is None:
1665 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1666 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1669 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1672 metadata = pkg.metadata
1673 removal_action = "remove" in self._dynamic_config.myparams
1676 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1678 edepend[k] = metadata[k]
1680 if not pkg.built and \
1681 "--buildpkgonly" in self._frozen_config.myopts and \
1682 "deep" not in self._dynamic_config.myparams:
1683 edepend["RDEPEND"] = ""
1684 edepend["PDEPEND"] = ""
1686 ignore_build_time_deps = False
1687 if pkg.built and not removal_action:
1688 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1689 # Pull in build time deps as requested, but marked them as
1690 # "optional" since they are not strictly required. This allows
1691 # more freedom in the merge order calculation for solving
1692 # circular dependencies. Don't convert to PDEPEND since that
1693 # could make --with-bdeps=y less effective if it is used to
1694 # adjust merge order to prevent built_with_use() calls from
1698 ignore_build_time_deps = True
1700 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1701 # Removal actions never traverse ignored buildtime
1702 # dependencies, so it's safe to discard them early.
1703 edepend["DEPEND"] = ""
1704 ignore_build_time_deps = True
1707 depend_root = myroot
1709 depend_root = self._frozen_config._running_root.root
1710 root_deps = self._frozen_config.myopts.get("--root-deps")
1711 if root_deps is not None:
1712 if root_deps is True:
1713 depend_root = myroot
1714 elif root_deps == "rdeps":
1715 ignore_build_time_deps = True
1717 # If rebuild mode is not enabled, it's safe to discard ignored
1718 # build-time dependencies. If you want these deps to be traversed
1719 # in "complete" mode then you need to specify --with-bdeps=y.
1720 if ignore_build_time_deps and \
1721 not self._rebuild.rebuild:
1722 edepend["DEPEND"] = ""
1725 (depend_root, edepend["DEPEND"],
1726 self._priority(buildtime=True,
1727 optional=(pkg.built or ignore_build_time_deps),
1728 ignored=ignore_build_time_deps)),
1729 (myroot, edepend["RDEPEND"],
1730 self._priority(runtime=True)),
1731 (myroot, edepend["PDEPEND"],
1732 self._priority(runtime_post=True))
1735 debug = "--debug" in self._frozen_config.myopts
1737 for dep_root, dep_string, dep_priority in deps:
1741 writemsg_level("\nParent: %s\n" % (pkg,),
1742 noiselevel=-1, level=logging.DEBUG)
1743 writemsg_level("Depstring: %s\n" % (dep_string,),
1744 noiselevel=-1, level=logging.DEBUG)
1745 writemsg_level("Priority: %s\n" % (dep_priority,),
1746 noiselevel=-1, level=logging.DEBUG)
1749 dep_string = portage.dep.use_reduce(dep_string,
1750 uselist=self._pkg_use_enabled(pkg),
1751 is_valid_flag=pkg.iuse.is_valid_flag,
1752 opconvert=True, token_class=Atom,
1753 eapi=pkg.metadata['EAPI'])
1754 except portage.exception.InvalidDependString as e:
1755 if not pkg.installed:
1756 # should have been masked before it was selected
1760 # Try again, but omit the is_valid_flag argument, since
1761 # invalid USE conditionals are a common problem and it's
1762 # practical to ignore this issue for installed packages.
1764 dep_string = portage.dep.use_reduce(dep_string,
1765 uselist=self._pkg_use_enabled(pkg),
1766 opconvert=True, token_class=Atom,
1767 eapi=pkg.metadata['EAPI'])
1768 except portage.exception.InvalidDependString as e:
1769 self._dynamic_config._masked_installed.add(pkg)
1774 dep_string = list(self._queue_disjunctive_deps(
1775 pkg, dep_root, dep_priority, dep_string))
1776 except portage.exception.InvalidDependString as e:
1778 self._dynamic_config._masked_installed.add(pkg)
1782 # should have been masked before it was selected
1788 if not self._add_pkg_dep_string(
1789 pkg, dep_root, dep_priority, dep_string,
1793 self._dynamic_config._traversed_pkg_deps.add(pkg)
1796 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1798 _autounmask_backup = self._dynamic_config._autounmask
1799 if dep_priority.optional or dep_priority.ignored:
1800 # Temporarily disable autounmask for deps that
1801 # don't necessarily need to be satisfied.
1802 self._dynamic_config._autounmask = False
1804 return self._wrapped_add_pkg_dep_string(
1805 pkg, dep_root, dep_priority, dep_string,
1808 self._dynamic_config._autounmask = _autounmask_backup
1810 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1811 dep_string, allow_unsatisfied):
1812 depth = pkg.depth + 1
1813 deep = self._dynamic_config.myparams.get("deep", 0)
1814 recurse_satisfied = deep is True or depth <= deep
1815 debug = "--debug" in self._frozen_config.myopts
1816 strict = pkg.type_name != "installed"
1819 writemsg_level("\nParent: %s\n" % (pkg,),
1820 noiselevel=-1, level=logging.DEBUG)
1821 dep_repr = portage.dep.paren_enclose(dep_string,
1822 unevaluated_atom=True, opconvert=True)
1823 writemsg_level("Depstring: %s\n" % (dep_repr,),
1824 noiselevel=-1, level=logging.DEBUG)
1825 writemsg_level("Priority: %s\n" % (dep_priority,),
1826 noiselevel=-1, level=logging.DEBUG)
1829 selected_atoms = self._select_atoms(dep_root,
1830 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1831 strict=strict, priority=dep_priority)
1832 except portage.exception.InvalidDependString:
1834 self._dynamic_config._masked_installed.add(pkg)
1837 # should have been masked before it was selected
1841 writemsg_level("Candidates: %s\n" % \
1842 ([str(x) for x in selected_atoms[pkg]],),
1843 noiselevel=-1, level=logging.DEBUG)
1845 root_config = self._frozen_config.roots[dep_root]
1846 vardb = root_config.trees["vartree"].dbapi
1847 traversed_virt_pkgs = set()
1849 reinstall_atoms = self._frozen_config.reinstall_atoms
1850 for atom, child in self._minimize_children(
1851 pkg, dep_priority, root_config, selected_atoms[pkg]):
1853 # If this was a specially generated virtual atom
1854 # from dep_check, map it back to the original, in
1855 # order to avoid distortion in places like display
1856 # or conflict resolution code.
1857 is_virt = hasattr(atom, '_orig_atom')
1858 atom = getattr(atom, '_orig_atom', atom)
1860 if atom.blocker and \
1861 (dep_priority.optional or dep_priority.ignored):
1862 # For --with-bdeps, ignore build-time only blockers
1863 # that originate from built packages.
1866 mypriority = dep_priority.copy()
1867 if not atom.blocker:
1868 inst_pkgs = [inst_pkg for inst_pkg in
1869 reversed(vardb.match_pkgs(atom))
1870 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1871 modified_use=self._pkg_use_enabled(inst_pkg))]
1873 for inst_pkg in inst_pkgs:
1874 if self._pkg_visibility_check(inst_pkg):
1876 mypriority.satisfied = inst_pkg
1878 if not mypriority.satisfied:
1879 # none visible, so use highest
1880 mypriority.satisfied = inst_pkgs[0]
1882 dep = Dependency(atom=atom,
1883 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1884 priority=mypriority, root=dep_root)
1886 # In some cases, dep_check will return deps that shouldn't
1887 # be proccessed any further, so they are identified and
1888 # discarded here. Try to discard as few as possible since
1889 # discarded dependencies reduce the amount of information
1890 # available for optimization of merge order.
1892 if not atom.blocker and \
1893 not recurse_satisfied and \
1894 mypriority.satisfied and \
1895 mypriority.satisfied.visible and \
1896 dep.child is not None and \
1897 not dep.child.installed and \
1898 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1899 dep.child.slot_atom) is None:
1902 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
1903 except InvalidDependString:
1904 if not dep.child.installed:
1908 # Existing child selection may not be valid unless
1909 # it's added to the graph immediately, since "complete"
1910 # mode may select a different child later.
1913 self._dynamic_config._ignored_deps.append(dep)
1916 if dep_priority.ignored and \
1917 not self._dynamic_config._traverse_ignored_deps:
1918 if is_virt and dep.child is not None:
1919 traversed_virt_pkgs.add(dep.child)
1921 self._dynamic_config._ignored_deps.append(dep)
1923 if not self._add_dep(dep,
1924 allow_unsatisfied=allow_unsatisfied):
1926 if is_virt and dep.child is not None:
1927 traversed_virt_pkgs.add(dep.child)
1929 selected_atoms.pop(pkg)
1931 # Add selected indirect virtual deps to the graph. This
1932 # takes advantage of circular dependency avoidance that's done
1933 # by dep_zapdeps. We preserve actual parent/child relationships
1934 # here in order to avoid distorting the dependency graph like
1935 # <=portage-2.1.6.x did.
1936 for virt_dep, atoms in selected_atoms.items():
1938 virt_pkg = virt_dep.child
1939 if virt_pkg not in traversed_virt_pkgs:
1943 writemsg_level("\nCandidates: %s: %s\n" % \
1944 (virt_pkg.cpv, [str(x) for x in atoms]),
1945 noiselevel=-1, level=logging.DEBUG)
1947 if not dep_priority.ignored or \
1948 self._dynamic_config._traverse_ignored_deps:
1950 inst_pkgs = [inst_pkg for inst_pkg in
1951 reversed(vardb.match_pkgs(virt_dep.atom))
1952 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1953 modified_use=self._pkg_use_enabled(inst_pkg))]
1955 for inst_pkg in inst_pkgs:
1956 if self._pkg_visibility_check(inst_pkg):
1958 virt_dep.priority.satisfied = inst_pkg
1960 if not virt_dep.priority.satisfied:
1961 # none visible, so use highest
1962 virt_dep.priority.satisfied = inst_pkgs[0]
1964 if not self._add_pkg(virt_pkg, virt_dep):
1967 for atom, child in self._minimize_children(
1968 pkg, self._priority(runtime=True), root_config, atoms):
1970 # If this was a specially generated virtual atom
1971 # from dep_check, map it back to the original, in
1972 # order to avoid distortion in places like display
1973 # or conflict resolution code.
1974 is_virt = hasattr(atom, '_orig_atom')
1975 atom = getattr(atom, '_orig_atom', atom)
1977 # This is a GLEP 37 virtual, so its deps are all runtime.
1978 mypriority = self._priority(runtime=True)
1979 if not atom.blocker:
1980 inst_pkgs = [inst_pkg for inst_pkg in
1981 reversed(vardb.match_pkgs(atom))
1982 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1983 modified_use=self._pkg_use_enabled(inst_pkg))]
1985 for inst_pkg in inst_pkgs:
1986 if self._pkg_visibility_check(inst_pkg):
1988 mypriority.satisfied = inst_pkg
1990 if not mypriority.satisfied:
1991 # none visible, so use highest
1992 mypriority.satisfied = inst_pkgs[0]
1994 # Dependencies of virtuals are considered to have the
1995 # same depth as the virtual itself.
1996 dep = Dependency(atom=atom,
1997 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1998 parent=virt_pkg, priority=mypriority, root=dep_root,
1999 collapsed_parent=pkg, collapsed_priority=dep_priority)
2002 if not atom.blocker and \
2003 not recurse_satisfied and \
2004 mypriority.satisfied and \
2005 mypriority.satisfied.visible and \
2006 dep.child is not None and \
2007 not dep.child.installed and \
2008 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2009 dep.child.slot_atom) is None:
2012 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2013 except InvalidDependString:
2014 if not dep.child.installed:
2020 self._dynamic_config._ignored_deps.append(dep)
2023 if dep_priority.ignored and \
2024 not self._dynamic_config._traverse_ignored_deps:
2025 if is_virt and dep.child is not None:
2026 traversed_virt_pkgs.add(dep.child)
2028 self._dynamic_config._ignored_deps.append(dep)
2030 if not self._add_dep(dep,
2031 allow_unsatisfied=allow_unsatisfied):
2033 if is_virt and dep.child is not None:
2034 traversed_virt_pkgs.add(dep.child)
2037 writemsg_level("\nExiting... %s\n" % (pkg,),
2038 noiselevel=-1, level=logging.DEBUG)
2042 def _minimize_children(self, parent, priority, root_config, atoms):
2044 Selects packages to satisfy the given atoms, and minimizes the
2045 number of selected packages. This serves to identify and eliminate
2046 redundant package selections when multiple atoms happen to specify
2056 dep_pkg, existing_node = self._select_package(
2057 root_config.root, atom)
2061 atom_pkg_map[atom] = dep_pkg
2063 if len(atom_pkg_map) < 2:
2064 for item in atom_pkg_map.items():
2070 for atom, pkg in atom_pkg_map.items():
2071 pkg_atom_map.setdefault(pkg, set()).add(atom)
2072 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
2074 for pkgs in cp_pkg_map.values():
2077 for atom in pkg_atom_map[pkg]:
2081 # Use a digraph to identify and eliminate any
2082 # redundant package selections.
2083 atom_pkg_graph = digraph()
2086 for atom in pkg_atom_map[pkg1]:
2088 atom_pkg_graph.add(pkg1, atom)
2089 atom_set = InternalPackageSet(initial_atoms=(atom,),
2094 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
2095 atom_pkg_graph.add(pkg2, atom)
2098 eliminate_pkg = True
2099 for atom in atom_pkg_graph.parent_nodes(pkg):
2100 if len(atom_pkg_graph.child_nodes(atom)) < 2:
2101 eliminate_pkg = False
2104 atom_pkg_graph.remove(pkg)
2106 # Yield ~, =*, < and <= atoms first, since those are more likely to
2107 # cause slot conflicts, and we want those atoms to be displayed
2108 # in the resulting slot conflict message (see bug #291142).
2109 # Give similar treatment to slot/sub-slot atoms.
2113 for atom in cp_atoms:
2114 if atom.slot_operator_built:
2115 abi_atoms.append(atom)
2118 for child_pkg in atom_pkg_graph.child_nodes(atom):
2119 existing_node, matches = \
2120 self._check_slot_conflict(child_pkg, atom)
2121 if existing_node and not matches:
2125 conflict_atoms.append(atom)
2127 normal_atoms.append(atom)
2129 for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
2130 child_pkgs = atom_pkg_graph.child_nodes(atom)
2131 # if more than one child, yield highest version
2132 if len(child_pkgs) > 1:
2134 yield (atom, child_pkgs[-1])
2136 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2138 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
2139 Yields non-disjunctive deps. Raises InvalidDependString when
2142 for x in dep_struct:
2143 if isinstance(x, list):
2144 if x and x[0] == "||":
2145 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2147 for y in self._queue_disjunctive_deps(
2148 pkg, dep_root, dep_priority, x):
2151 # Note: Eventually this will check for PROPERTIES=virtual
2152 # or whatever other metadata gets implemented for this
2154 if x.cp.startswith('virtual/'):
2155 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2159 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2160 self._dynamic_config._dep_disjunctive_stack.append(
2161 (pkg, dep_root, dep_priority, dep_struct))
2163 def _pop_disjunction(self, allow_unsatisfied):
2165 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
2166 populate self._dynamic_config._dep_stack.
2168 pkg, dep_root, dep_priority, dep_struct = \
2169 self._dynamic_config._dep_disjunctive_stack.pop()
2170 if not self._add_pkg_dep_string(
2171 pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
2175 def _priority(self, **kwargs):
2176 if "remove" in self._dynamic_config.myparams:
2177 priority_constructor = UnmergeDepPriority
2179 priority_constructor = DepPriority
2180 return priority_constructor(**kwargs)
2182 def _dep_expand(self, root_config, atom_without_category):
2184 @param root_config: a root config instance
2185 @type root_config: RootConfig
2186 @param atom_without_category: an atom without a category component
2187 @type atom_without_category: String
2189 @return: a list of atoms containing categories (possibly empty)
2191 null_cp = portage.dep_getkey(insert_category_into_atom(
2192 atom_without_category, "null"))
2193 cat, atom_pn = portage.catsplit(null_cp)
2195 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
2197 for db, pkg_type, built, installed, db_keys in dbs:
2198 for cat in db.categories:
2199 if db.cp_list("%s/%s" % (cat, atom_pn)):
2203 for cat in categories:
2204 deps.append(Atom(insert_category_into_atom(
2205 atom_without_category, cat), allow_repo=True))
2208 def _have_new_virt(self, root, atom_cp):
2210 for db, pkg_type, built, installed, db_keys in \
2211 self._dynamic_config._filtered_trees[root]["dbs"]:
2212 if db.cp_list(atom_cp):
2217 def _iter_atoms_for_pkg(self, pkg):
2218 depgraph_sets = self._dynamic_config.sets[pkg.root]
2219 atom_arg_map = depgraph_sets.atom_arg_map
2220 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
2221 if atom.cp != pkg.cp and \
2222 self._have_new_virt(pkg.root, atom.cp):
2225 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
2226 visible_pkgs.reverse() # descending order
2228 for visible_pkg in visible_pkgs:
2229 if visible_pkg.cp != atom.cp:
2231 if pkg >= visible_pkg:
2232 # This is descending order, and we're not
2233 # interested in any versions <= pkg given.
2235 if pkg.slot_atom != visible_pkg.slot_atom:
2236 higher_slot = visible_pkg
2238 if higher_slot is not None:
2240 for arg in atom_arg_map[(atom, pkg.root)]:
2241 if isinstance(arg, PackageArg) and \
2246 def select_files(self, myfiles):
2247 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
2248 self._dynamic_config._initial_arg_list and call self._resolve to create the
2249 appropriate depgraph and return a favorite list."""
2251 debug = "--debug" in self._frozen_config.myopts
2252 root_config = self._frozen_config.roots[self._frozen_config.target_root]
2253 sets = root_config.sets
2254 depgraph_sets = self._dynamic_config.sets[root_config.root]
2256 eroot = root_config.root
2257 root = root_config.settings['ROOT']
2258 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
2259 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
2260 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
2261 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
2262 pkgsettings = self._frozen_config.pkgsettings[eroot]
2264 onlydeps = "--onlydeps" in self._frozen_config.myopts
2267 ext = os.path.splitext(x)[1]
2269 if not os.path.exists(x):
2271 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2272 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2273 elif os.path.exists(
2274 os.path.join(pkgsettings["PKGDIR"], x)):
2275 x = os.path.join(pkgsettings["PKGDIR"], x)
2277 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
2278 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
2279 return 0, myfavorites
2280 mytbz2=portage.xpak.tbz2(x)
2281 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2282 if os.path.realpath(x) != \
2283 os.path.realpath(bindb.bintree.getname(mykey)):
2284 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2285 self._dynamic_config._skip_restart = True
2286 return 0, myfavorites
2288 pkg = self._pkg(mykey, "binary", root_config,
2290 args.append(PackageArg(arg=x, package=pkg,
2291 root_config=root_config))
2292 elif ext==".ebuild":
2293 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2294 pkgdir = os.path.dirname(ebuild_path)
2295 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2296 cp = pkgdir[len(tree_root)+1:]
2297 e = portage.exception.PackageNotFound(
2298 ("%s is not in a valid portage tree " + \
2299 "hierarchy or does not exist") % x)
2300 if not portage.isvalidatom(cp):
2302 cat = portage.catsplit(cp)[0]
2303 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2304 if not portage.isvalidatom("="+mykey):
2306 ebuild_path = portdb.findname(mykey)
2308 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2309 cp, os.path.basename(ebuild_path)):
2310 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2311 self._dynamic_config._skip_restart = True
2312 return 0, myfavorites
2313 if mykey not in portdb.xmatch(
2314 "match-visible", portage.cpv_getkey(mykey)):
2315 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2316 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2317 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2318 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2321 raise portage.exception.PackageNotFound(
2322 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2323 pkg = self._pkg(mykey, "ebuild", root_config,
2324 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2325 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2326 args.append(PackageArg(arg=x, package=pkg,
2327 root_config=root_config))
2328 elif x.startswith(os.path.sep):
2329 if not x.startswith(eroot):
2330 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2331 " $EROOT.\n") % x, noiselevel=-1)
2332 self._dynamic_config._skip_restart = True
2334 # Queue these up since it's most efficient to handle
2335 # multiple files in a single iter_owners() call.
2336 lookup_owners.append(x)
2337 elif x.startswith("." + os.sep) or \
2338 x.startswith(".." + os.sep):
2339 f = os.path.abspath(x)
2340 if not f.startswith(eroot):
2341 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2342 " $EROOT.\n") % (f, x), noiselevel=-1)
2343 self._dynamic_config._skip_restart = True
2345 lookup_owners.append(f)
2347 if x in ("system", "world"):
2349 if x.startswith(SETPREFIX):
2350 s = x[len(SETPREFIX):]
2352 raise portage.exception.PackageSetNotFound(s)
2353 if s in depgraph_sets.sets:
2356 depgraph_sets.sets[s] = pset
2357 args.append(SetArg(arg=x, pset=pset,
2358 root_config=root_config))
2360 if not is_valid_package_atom(x, allow_repo=True):
2361 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2363 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2364 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2365 self._dynamic_config._skip_restart = True
2367 # Don't expand categories or old-style virtuals here unless
2368 # necessary. Expansion of old-style virtuals here causes at
2369 # least the following problems:
2370 # 1) It's more difficult to determine which set(s) an atom
2371 # came from, if any.
2372 # 2) It takes away freedom from the resolver to choose other
2373 # possible expansions when necessary.
2375 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2376 root_config=root_config))
2378 expanded_atoms = self._dep_expand(root_config, x)
2379 installed_cp_set = set()
2380 for atom in expanded_atoms:
2381 if vardb.cp_list(atom.cp):
2382 installed_cp_set.add(atom.cp)
2384 if len(installed_cp_set) > 1:
2385 non_virtual_cps = set()
2386 for atom_cp in installed_cp_set:
2387 if not atom_cp.startswith("virtual/"):
2388 non_virtual_cps.add(atom_cp)
2389 if len(non_virtual_cps) == 1:
2390 installed_cp_set = non_virtual_cps
2392 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2393 installed_cp = next(iter(installed_cp_set))
2394 for atom in expanded_atoms:
2395 if atom.cp == installed_cp:
2397 for pkg in self._iter_match_pkgs_any(
2398 root_config, atom.without_use,
2400 if not pkg.installed:
2404 expanded_atoms = [atom]
2407 # If a non-virtual package and one or more virtual packages
2408 # are in expanded_atoms, use the non-virtual package.
2409 if len(expanded_atoms) > 1:
2410 number_of_virtuals = 0
2411 for expanded_atom in expanded_atoms:
2412 if expanded_atom.cp.startswith("virtual/"):
2413 number_of_virtuals += 1
2415 candidate = expanded_atom
2416 if len(expanded_atoms) - number_of_virtuals == 1:
2417 expanded_atoms = [ candidate ]
2419 if len(expanded_atoms) > 1:
2420 writemsg("\n\n", noiselevel=-1)
2421 ambiguous_package_name(x, expanded_atoms, root_config,
2422 self._frozen_config.spinner, self._frozen_config.myopts)
2423 self._dynamic_config._skip_restart = True
2424 return False, myfavorites
2426 atom = expanded_atoms[0]
2428 null_atom = Atom(insert_category_into_atom(x, "null"),
2430 cat, atom_pn = portage.catsplit(null_atom.cp)
2431 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2433 # Allow the depgraph to choose which virtual.
2434 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2439 if atom.use and atom.use.conditional:
2441 ("\n\n!!! '%s' contains a conditional " + \
2442 "which is not allowed.\n") % (x,), noiselevel=-1)
2443 writemsg("!!! Please check ebuild(5) for full details.\n")
2444 self._dynamic_config._skip_restart = True
2447 args.append(AtomArg(arg=x, atom=atom,
2448 root_config=root_config))
2452 search_for_multiple = False
2453 if len(lookup_owners) > 1:
2454 search_for_multiple = True
2456 for x in lookup_owners:
2457 if not search_for_multiple and os.path.isdir(x):
2458 search_for_multiple = True
2459 relative_paths.append(x[len(root)-1:])
2462 for pkg, relative_path in \
2463 real_vardb._owners.iter_owners(relative_paths):
2464 owners.add(pkg.mycpv)
2465 if not search_for_multiple:
2469 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2470 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2471 self._dynamic_config._skip_restart = True
2475 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2477 # portage now masks packages with missing slot, but it's
2478 # possible that one was installed by an older version
2479 atom = Atom(portage.cpv_getkey(cpv))
2481 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2482 args.append(AtomArg(arg=atom, atom=atom,
2483 root_config=root_config))
2485 if "--update" in self._frozen_config.myopts:
2486 # In some cases, the greedy slots behavior can pull in a slot that
2487 # the user would want to uninstall due to it being blocked by a
2488 # newer version in a different slot. Therefore, it's necessary to
2489 # detect and discard any that should be uninstalled. Each time
2490 # that arguments are updated, package selections are repeated in
2491 # order to ensure consistency with the current arguments:
2493 # 1) Initialize args
2494 # 2) Select packages and generate initial greedy atoms
2495 # 3) Update args with greedy atoms
2496 # 4) Select packages and generate greedy atoms again, while
2497 # accounting for any blockers between selected packages
2498 # 5) Update args with revised greedy atoms
2500 self._set_args(args)
2503 greedy_args.append(arg)
2504 if not isinstance(arg, AtomArg):
2506 for atom in self._greedy_slots(arg.root_config, arg.atom):
2508 AtomArg(arg=arg.arg, atom=atom,
2509 root_config=arg.root_config))
2511 self._set_args(greedy_args)
2514 # Revise greedy atoms, accounting for any blockers
2515 # between selected packages.
2516 revised_greedy_args = []
2518 revised_greedy_args.append(arg)
2519 if not isinstance(arg, AtomArg):
2521 for atom in self._greedy_slots(arg.root_config, arg.atom,
2522 blocker_lookahead=True):
2523 revised_greedy_args.append(
2524 AtomArg(arg=arg.arg, atom=atom,
2525 root_config=arg.root_config))
2526 args = revised_greedy_args
2527 del revised_greedy_args
2529 args.extend(self._gen_reinstall_sets())
2530 self._set_args(args)
2532 myfavorites = set(myfavorites)
2534 if isinstance(arg, (AtomArg, PackageArg)):
2535 myfavorites.add(arg.atom)
2536 elif isinstance(arg, SetArg):
2537 if not arg.internal:
2538 myfavorites.add(arg.arg)
2539 myfavorites = list(myfavorites)
2542 portage.writemsg("\n", noiselevel=-1)
2543 # Order needs to be preserved since a feature of --nodeps
2544 # is to allow the user to force a specific merge order.
2545 self._dynamic_config._initial_arg_list = args[:]
2547 return self._resolve(myfavorites)
2549 def _gen_reinstall_sets(self):
2552 for root, atom in self._rebuild.rebuild_list:
2553 atom_list.append((root, '__auto_rebuild__', atom))
2554 for root, atom in self._rebuild.reinstall_list:
2555 atom_list.append((root, '__auto_reinstall__', atom))
2556 for root, atom in self._dynamic_config._slot_operator_replace_installed:
2557 atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
2560 for root, set_name, atom in atom_list:
2561 set_dict.setdefault((root, set_name), []).append(atom)
2563 for (root, set_name), atoms in set_dict.items():
2564 yield SetArg(arg=(SETPREFIX + set_name),
2565 # Set reset_depth=False here, since we don't want these
2566 # special sets to interact with depth calculations (see
2567 # the emerge --deep=DEPTH option), though we want them
2568 # to behave like normal arguments in most other respects.
2569 pset=InternalPackageSet(initial_atoms=atoms),
2570 force_reinstall=True,
2573 root_config=self._frozen_config.roots[root])
2575 def _resolve(self, myfavorites):
2576 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2577 call self._creategraph to process theier deps and return
2579 debug = "--debug" in self._frozen_config.myopts
2580 onlydeps = "--onlydeps" in self._frozen_config.myopts
2581 myroot = self._frozen_config.target_root
2582 pkgsettings = self._frozen_config.pkgsettings[myroot]
2583 pprovideddict = pkgsettings.pprovideddict
2584 virtuals = pkgsettings.getvirtuals()
2585 args = self._dynamic_config._initial_arg_list[:]
2587 for arg in self._expand_set_args(args, add_to_digraph=True):
2588 for atom in arg.pset.getAtoms():
2589 self._spinner_update()
2590 dep = Dependency(atom=atom, onlydeps=onlydeps,
2591 root=myroot, parent=arg)
2593 pprovided = pprovideddict.get(atom.cp)
2594 if pprovided and portage.match_from_list(atom, pprovided):
2595 # A provided package has been specified on the command line.
2596 self._dynamic_config._pprovided_args.append((arg, atom))
2598 if isinstance(arg, PackageArg):
2599 if not self._add_pkg(arg.package, dep) or \
2600 not self._create_graph():
2601 if not self.need_restart():
2602 sys.stderr.write(("\n\n!!! Problem " + \
2603 "resolving dependencies for %s\n") % \
2605 return 0, myfavorites
2608 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2609 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2610 pkg, existing_node = self._select_package(
2611 myroot, atom, onlydeps=onlydeps)
2613 pprovided_match = False
2614 for virt_choice in virtuals.get(atom.cp, []):
2615 expanded_atom = portage.dep.Atom(
2616 atom.replace(atom.cp, virt_choice.cp, 1))
2617 pprovided = pprovideddict.get(expanded_atom.cp)
2619 portage.match_from_list(expanded_atom, pprovided):
2620 # A provided package has been
2621 # specified on the command line.
2622 self._dynamic_config._pprovided_args.append((arg, atom))
2623 pprovided_match = True
2628 if not (isinstance(arg, SetArg) and \
2629 arg.name in ("selected", "system", "world")):
2630 self._dynamic_config._unsatisfied_deps_for_display.append(
2631 ((myroot, atom), {"myparent" : arg}))
2632 return 0, myfavorites
2634 self._dynamic_config._missing_args.append((arg, atom))
2636 if atom.cp != pkg.cp:
2637 # For old-style virtuals, we need to repeat the
2638 # package.provided check against the selected package.
2639 expanded_atom = atom.replace(atom.cp, pkg.cp)
2640 pprovided = pprovideddict.get(pkg.cp)
2642 portage.match_from_list(expanded_atom, pprovided):
2643 # A provided package has been
2644 # specified on the command line.
2645 self._dynamic_config._pprovided_args.append((arg, atom))
2647 if pkg.installed and \
2648 "selective" not in self._dynamic_config.myparams and \
2649 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2650 pkg, modified_use=self._pkg_use_enabled(pkg)):
2651 self._dynamic_config._unsatisfied_deps_for_display.append(
2652 ((myroot, atom), {"myparent" : arg}))
2653 # Previous behavior was to bail out in this case, but
2654 # since the dep is satisfied by the installed package,
2655 # it's more friendly to continue building the graph
2656 # and just show a warning message. Therefore, only bail
2657 # out here if the atom is not from either the system or
2659 if not (isinstance(arg, SetArg) and \
2660 arg.name in ("selected", "system", "world")):
2661 return 0, myfavorites
2663 # Add the selected package to the graph as soon as possible
2664 # so that later dep_check() calls can use it as feedback
2665 # for making more consistent atom selections.
2666 if not self._add_pkg(pkg, dep):
2667 if self.need_restart():
2669 elif isinstance(arg, SetArg):
2670 writemsg(("\n\n!!! Problem resolving " + \
2671 "dependencies for %s from %s\n") % \
2672 (atom, arg.arg), noiselevel=-1)
2674 writemsg(("\n\n!!! Problem resolving " + \
2675 "dependencies for %s\n") % \
2676 (atom,), noiselevel=-1)
2677 return 0, myfavorites
2679 except SystemExit as e:
2680 raise # Needed else can't exit
2681 except Exception as e:
2682 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2683 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2686 # Now that the root packages have been added to the graph,
2687 # process the dependencies.
2688 if not self._create_graph():
2689 return 0, myfavorites
2693 except self._unknown_internal_error:
2694 return False, myfavorites
2696 if (self._dynamic_config._slot_collision_info and
2697 not self._accept_blocker_conflicts()) or \
2698 (self._dynamic_config._allow_backtracking and
2699 "slot conflict" in self._dynamic_config._backtrack_infos):
2700 return False, myfavorites
2702 if self._rebuild.trigger_rebuilds():
2703 backtrack_infos = self._dynamic_config._backtrack_infos
2704 config = backtrack_infos.setdefault("config", {})
2705 config["rebuild_list"] = self._rebuild.rebuild_list
2706 config["reinstall_list"] = self._rebuild.reinstall_list
2707 self._dynamic_config._need_restart = True
2708 return False, myfavorites
2710 if "config" in self._dynamic_config._backtrack_infos and \
2711 ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
2712 "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
2713 self.need_restart():
2714 return False, myfavorites
2716 # Any failures except those due to autounmask *alone* should return
2717 # before this point, since the success_without_autounmask flag that's
2718 # set below is reserved for cases where there are *zero* other
2719 # problems. For reference, see backtrack_depgraph, where it skips the
2720 # get_best_run() call when success_without_autounmask is True.
2722 digraph_nodes = self._dynamic_config.digraph.nodes
2724 if any(x in digraph_nodes for x in
2725 self._dynamic_config._needed_unstable_keywords) or \
2726 any(x in digraph_nodes for x in
2727 self._dynamic_config._needed_p_mask_changes) or \
2728 any(x in digraph_nodes for x in
2729 self._dynamic_config._needed_use_config_changes) or \
2730 any(x in digraph_nodes for x in
2731 self._dynamic_config._needed_license_changes) :
2732 #We failed if the user needs to change the configuration
2733 self._dynamic_config._success_without_autounmask = True
2734 return False, myfavorites
2736 # We're true here unless we are missing binaries.
2737 return (True, myfavorites)
2739 def _set_args(self, args):
2741 Create the "__non_set_args__" package set from atoms and packages given as
2742 arguments. This method can be called multiple times if necessary.
2743 The package selection cache is automatically invalidated, since
2744 arguments influence package selections.
2749 for root in self._dynamic_config.sets:
2750 depgraph_sets = self._dynamic_config.sets[root]
2751 depgraph_sets.sets.setdefault('__non_set_args__',
2752 InternalPackageSet(allow_repo=True)).clear()
2753 depgraph_sets.atoms.clear()
2754 depgraph_sets.atom_arg_map.clear()
2755 set_atoms[root] = []
2756 non_set_atoms[root] = []
2758 # We don't add set args to the digraph here since that
2759 # happens at a later stage and we don't want to make
2760 # any state changes here that aren't reversed by a
2761 # another call to this method.
2762 for arg in self._expand_set_args(args, add_to_digraph=False):
2763 atom_arg_map = self._dynamic_config.sets[
2764 arg.root_config.root].atom_arg_map
2765 if isinstance(arg, SetArg):
2766 atom_group = set_atoms[arg.root_config.root]
2768 atom_group = non_set_atoms[arg.root_config.root]
2770 for atom in arg.pset.getAtoms():
2771 atom_group.append(atom)
2772 atom_key = (atom, arg.root_config.root)
2773 refs = atom_arg_map.get(atom_key)
2776 atom_arg_map[atom_key] = refs
2780 for root in self._dynamic_config.sets:
2781 depgraph_sets = self._dynamic_config.sets[root]
2782 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2783 non_set_atoms.get(root, [])))
2784 depgraph_sets.sets['__non_set_args__'].update(
2785 non_set_atoms.get(root, []))
2787 # Invalidate the package selection cache, since
2788 # arguments influence package selections.
2789 self._dynamic_config._highest_pkg_cache.clear()
2790 for trees in self._dynamic_config._filtered_trees.values():
2791 trees["porttree"].dbapi._clear_cache()
2793 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2795 Return a list of slot atoms corresponding to installed slots that
2796 differ from the slot of the highest visible match. When
2797 blocker_lookahead is True, slot atoms that would trigger a blocker
2798 conflict are automatically discarded, potentially allowing automatic
2799 uninstallation of older slots when appropriate.
2801 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2802 if highest_pkg is None:
2804 vardb = root_config.trees["vartree"].dbapi
2806 for cpv in vardb.match(atom):
2807 # don't mix new virtuals with old virtuals
2808 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2809 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2811 slots.add(highest_pkg.metadata["SLOT"])
2815 slots.remove(highest_pkg.metadata["SLOT"])
2818 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2819 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2820 if pkg is not None and \
2821 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2822 greedy_pkgs.append(pkg)
2825 if not blocker_lookahead:
2826 return [pkg.slot_atom for pkg in greedy_pkgs]
2829 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2830 for pkg in greedy_pkgs + [highest_pkg]:
2831 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2833 selected_atoms = self._select_atoms(
2834 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2835 parent=pkg, strict=True)
2836 except portage.exception.InvalidDependString:
2839 for atoms in selected_atoms.values():
2840 blocker_atoms.extend(x for x in atoms if x.blocker)
2841 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2843 if highest_pkg not in blockers:
2846 # filter packages with invalid deps
2847 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2849 # filter packages that conflict with highest_pkg
2850 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2851 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2852 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2857 # If two packages conflict, discard the lower version.
2858 discard_pkgs = set()
2859 greedy_pkgs.sort(reverse=True)
2860 for i in range(len(greedy_pkgs) - 1):
2861 pkg1 = greedy_pkgs[i]
2862 if pkg1 in discard_pkgs:
2864 for j in range(i + 1, len(greedy_pkgs)):
2865 pkg2 = greedy_pkgs[j]
2866 if pkg2 in discard_pkgs:
2868 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2869 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2871 discard_pkgs.add(pkg2)
2873 return [pkg.slot_atom for pkg in greedy_pkgs \
2874 if pkg not in discard_pkgs]
2876 def _select_atoms_from_graph(self, *pargs, **kwargs):
2878 Prefer atoms matching packages that have already been
2879 added to the graph or those that are installed and have
2880 not been scheduled for replacement.
2882 kwargs["trees"] = self._dynamic_config._graph_trees
2883 return self._select_atoms_highest_available(*pargs, **kwargs)
2885 def _select_atoms_highest_available(self, root, depstring,
2886 myuse=None, parent=None, strict=True, trees=None, priority=None):
2887 """This will raise InvalidDependString if necessary. If trees is
2888 None then self._dynamic_config._filtered_trees is used."""
2890 if not isinstance(depstring, list):
2892 is_valid_flag = None
2893 if parent is not None:
2894 eapi = parent.metadata['EAPI']
2895 if not parent.installed:
2896 is_valid_flag = parent.iuse.is_valid_flag
2897 depstring = portage.dep.use_reduce(depstring,
2898 uselist=myuse, opconvert=True, token_class=Atom,
2899 is_valid_flag=is_valid_flag, eapi=eapi)
2901 if (self._dynamic_config.myparams.get(
2902 "ignore_built_slot_operator_deps", "n") == "y" and
2903 parent and parent.built):
2904 ignore_built_slot_operator_deps(depstring)
2906 pkgsettings = self._frozen_config.pkgsettings[root]
2908 trees = self._dynamic_config._filtered_trees
2909 mytrees = trees[root]
2910 atom_graph = digraph()
2912 # Temporarily disable autounmask so that || preferences
2913 # account for masking and USE settings.
2914 _autounmask_backup = self._dynamic_config._autounmask
2915 self._dynamic_config._autounmask = False
2916 # backup state for restoration, in case of recursive
2917 # calls to this method
2918 backup_state = mytrees.copy()
2920 # clear state from previous call, in case this
2921 # call is recursive (we have a backup, that we
2922 # will use to restore it later)
2923 mytrees.pop("pkg_use_enabled", None)
2924 mytrees.pop("parent", None)
2925 mytrees.pop("atom_graph", None)
2926 mytrees.pop("priority", None)
2928 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2929 if parent is not None:
2930 mytrees["parent"] = parent
2931 mytrees["atom_graph"] = atom_graph
2932 if priority is not None:
2933 mytrees["priority"] = priority
2935 mycheck = portage.dep_check(depstring, None,
2936 pkgsettings, myuse=myuse,
2937 myroot=root, trees=trees)
2940 self._dynamic_config._autounmask = _autounmask_backup
2941 mytrees.pop("pkg_use_enabled", None)
2942 mytrees.pop("parent", None)
2943 mytrees.pop("atom_graph", None)
2944 mytrees.pop("priority", None)
2945 mytrees.update(backup_state)
2947 raise portage.exception.InvalidDependString(mycheck[1])
2949 selected_atoms = mycheck[1]
2950 elif parent not in atom_graph:
2951 selected_atoms = {parent : mycheck[1]}
2953 # Recursively traversed virtual dependencies, and their
2954 # direct dependencies, are considered to have the same
2955 # depth as direct dependencies.
2956 if parent.depth is None:
2959 virt_depth = parent.depth + 1
2960 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2961 selected_atoms = OrderedDict()
2962 node_stack = [(parent, None, None)]
2963 traversed_nodes = set()
2965 node, node_parent, parent_atom = node_stack.pop()
2966 traversed_nodes.add(node)
2970 if node_parent is parent:
2971 if priority is None:
2972 node_priority = None
2974 node_priority = priority.copy()
2976 # virtuals only have runtime deps
2977 node_priority = self._priority(runtime=True)
2979 k = Dependency(atom=parent_atom,
2980 blocker=parent_atom.blocker, child=node,
2981 depth=virt_depth, parent=node_parent,
2982 priority=node_priority, root=node.root)
2985 selected_atoms[k] = child_atoms
2986 for atom_node in atom_graph.child_nodes(node):
2987 child_atom = atom_node[0]
2988 if id(child_atom) not in chosen_atom_ids:
2990 child_atoms.append(child_atom)
2991 for child_node in atom_graph.child_nodes(atom_node):
2992 if child_node in traversed_nodes:
2994 if not portage.match_from_list(
2995 child_atom, [child_node]):
2996 # Typically this means that the atom
2997 # specifies USE deps that are unsatisfied
2998 # by the selected package. The caller will
2999 # record this as an unsatisfied dependency
3002 node_stack.append((child_node, node, child_atom))
3004 return selected_atoms
3006 def _expand_virt_from_graph(self, root, atom):
3007 if not isinstance(atom, Atom):
3009 graphdb = self._dynamic_config.mydbapi[root]
3010 match = graphdb.match_pkgs(atom)
3015 if not pkg.cpv.startswith("virtual/"):
3019 rdepend = self._select_atoms_from_graph(
3020 pkg.root, pkg.metadata.get("RDEPEND", ""),
3021 myuse=self._pkg_use_enabled(pkg),
3022 parent=pkg, strict=False)
3023 except InvalidDependString as e:
3024 writemsg_level("!!! Invalid RDEPEND in " + \
3025 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3026 (pkg.root, pkg.cpv, e),
3027 noiselevel=-1, level=logging.ERROR)
3031 for atoms in rdepend.values():
3033 if hasattr(atom, "_orig_atom"):
3034 # Ignore virtual atoms since we're only
3035 # interested in expanding the real atoms.
3039 def _virt_deps_visible(self, pkg, ignore_use=False):
3041 Assumes pkg is a virtual package. Traverses virtual deps recursively
3042 and returns True if all deps are visible, False otherwise. This is
3043 useful for checking if it will be necessary to expand virtual slots,
3044 for cases like bug #382557.
3047 rdepend = self._select_atoms(
3048 pkg.root, pkg.metadata.get("RDEPEND", ""),
3049 myuse=self._pkg_use_enabled(pkg),
3050 parent=pkg, priority=self._priority(runtime=True))
3051 except InvalidDependString as e:
3052 if not pkg.installed:
3054 writemsg_level("!!! Invalid RDEPEND in " + \
3055 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3056 (pkg.root, pkg.cpv, e),
3057 noiselevel=-1, level=logging.ERROR)
3060 for atoms in rdepend.values():
3063 atom = atom.without_use
3064 pkg, existing = self._select_package(
3066 if pkg is None or not self._pkg_visibility_check(pkg):
3071 def _get_dep_chain(self, start_node, target_atom=None,
3072 unsatisfied_dependency=False):
3074 Returns a list of (atom, node_type) pairs that represent a dep chain.
3075 If target_atom is None, the first package shown is pkg's parent.
3076 If target_atom is not None the first package shown is pkg.
3077 If unsatisfied_dependency is True, the first parent is select who's
3078 dependency is not satisfied by 'pkg'. This is need for USE changes.
3079 (Does not support target_atom.)
3081 traversed_nodes = set()
3085 all_parents = self._dynamic_config._parent_atoms
3086 graph = self._dynamic_config.digraph
3088 if target_atom is not None and isinstance(node, Package):
3089 affecting_use = set()
3090 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
3092 affecting_use.update(extract_affecting_use(
3093 node.metadata[dep_str], target_atom,
3094 eapi=node.metadata["EAPI"]))
3095 except InvalidDependString:
3096 if not node.installed:
3098 affecting_use.difference_update(node.use.mask, node.use.force)
3099 pkg_name = _unicode_decode("%s") % (node.cpv,)
3102 for flag in affecting_use:
3103 if flag in self._pkg_use_enabled(node):
3106 usedep.append("-"+flag)
3107 pkg_name += "[%s]" % ",".join(usedep)
3109 dep_chain.append((pkg_name, node.type_name))
3112 # To build a dep chain for the given package we take
3113 # "random" parents form the digraph, except for the
3114 # first package, because we want a parent that forced
3115 # the corresponding change (i.e '>=foo-2', instead 'foo').
3117 traversed_nodes.add(start_node)
3119 start_node_parent_atoms = {}
3120 for ppkg, patom in all_parents.get(node, []):
3121 # Get a list of suitable atoms. For use deps
3122 # (aka unsatisfied_dependency is not None) we
3123 # need that the start_node doesn't match the atom.
3124 if not unsatisfied_dependency or \
3125 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
3126 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
3128 if start_node_parent_atoms:
3129 # If there are parents in all_parents then use one of them.
3130 # If not, then this package got pulled in by an Arg and
3131 # will be correctly handled by the code that handles later
3132 # packages in the dep chain.
3133 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
3136 for ppkg in start_node_parent_atoms[best_match]:
3138 if ppkg in self._dynamic_config._initial_arg_list:
3139 # Stop if reached the top level of the dep chain.
3142 while node is not None:
3143 traversed_nodes.add(node)
3145 if node not in graph:
3146 # The parent is not in the graph due to backtracking.
3149 elif isinstance(node, DependencyArg):
3150 if graph.parent_nodes(node):
3153 node_type = "argument"
3154 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
3156 elif node is not start_node:
3157 for ppkg, patom in all_parents[child]:
3159 if child is start_node and unsatisfied_dependency and \
3160 InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
3161 # This atom is satisfied by child, there must be another atom.
3163 atom = patom.unevaluated_atom
3167 priorities = graph.nodes[node][0].get(child)
3168 if priorities is None:
3169 # This edge comes from _parent_atoms and was not added to
3170 # the graph, and _parent_atoms does not contain priorities.
3171 dep_strings.add(node.metadata["DEPEND"])
3172 dep_strings.add(node.metadata["RDEPEND"])
3173 dep_strings.add(node.metadata["PDEPEND"])
3175 for priority in priorities:
3176 if priority.buildtime:
3177 dep_strings.add(node.metadata["DEPEND"])
3178 if priority.runtime:
3179 dep_strings.add(node.metadata["RDEPEND"])
3180 if priority.runtime_post:
3181 dep_strings.add(node.metadata["PDEPEND"])
3183 affecting_use = set()
3184 for dep_str in dep_strings:
3186 affecting_use.update(extract_affecting_use(
3187 dep_str, atom, eapi=node.metadata["EAPI"]))
3188 except InvalidDependString:
3189 if not node.installed:
3192 #Don't show flags as 'affecting' if the user can't change them,
3193 affecting_use.difference_update(node.use.mask, \
3196 pkg_name = _unicode_decode("%s") % (node.cpv,)
3199 for flag in affecting_use:
3200 if flag in self._pkg_use_enabled(node):
3203 usedep.append("-"+flag)
3204 pkg_name += "[%s]" % ",".join(usedep)
3206 dep_chain.append((pkg_name, node.type_name))
3208 # When traversing to parents, prefer arguments over packages
3209 # since arguments are root nodes. Never traverse the same
3210 # package twice, in order to prevent an infinite loop.
3212 selected_parent = None
3215 parent_unsatisfied = None
3217 for parent in self._dynamic_config.digraph.parent_nodes(node):
3218 if parent in traversed_nodes:
3220 if isinstance(parent, DependencyArg):
3223 if isinstance(parent, Package) and \
3224 parent.operation == "merge":
3225 parent_merge = parent
3226 if unsatisfied_dependency and node is start_node:
3227 # Make sure that pkg doesn't satisfy parent's dependency.
3228 # This ensures that we select the correct parent for use
3230 for ppkg, atom in all_parents[start_node]:
3232 atom_set = InternalPackageSet(initial_atoms=(atom,))
3233 if not atom_set.findAtomForPackage(start_node):
3234 parent_unsatisfied = parent
3237 selected_parent = parent
3239 if parent_unsatisfied is not None:
3240 selected_parent = parent_unsatisfied
3241 elif parent_merge is not None:
3242 # Prefer parent in the merge list (bug #354747).
3243 selected_parent = parent_merge
3244 elif parent_arg is not None:
3245 if self._dynamic_config.digraph.parent_nodes(parent_arg):
3246 selected_parent = parent_arg
3249 (_unicode_decode("%s") % (parent_arg,), "argument"))
3250 selected_parent = None
3252 node = selected_parent
3255 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
3256 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
3258 for node, node_type in dep_chain:
3259 if node_type == "argument":
3260 display_list.append("required by %s (argument)" % node)
3262 display_list.append("required by %s" % node)
3264 msg = "#" + ", ".join(display_list) + "\n"
3268 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
3269 check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
3271 When check_backtrack=True, no output is produced and
3272 the method either returns or raises _backtrack_mask if
3273 a matching package has been masked by backtracking.
3275 backtrack_mask = False
3276 autounmask_broke_use_dep = False
3277 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
3279 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
3281 xinfo = '"%s"' % atom.unevaluated_atom
3284 if isinstance(myparent, AtomArg):
3285 xinfo = _unicode_decode('"%s"') % (myparent,)
3286 # Discard null/ from failed cpv_expand category expansion.
3287 xinfo = xinfo.replace("null/", "")
3288 if root != self._frozen_config._running_root.root:
3289 xinfo = "%s for %s" % (xinfo, root)
3290 masked_packages = []
3292 missing_use_adjustable = set()
3293 required_use_unsatisfied = []
3294 masked_pkg_instances = set()
3295 have_eapi_mask = False
3296 pkgsettings = self._frozen_config.pkgsettings[root]
3297 root_config = self._frozen_config.roots[root]
3298 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3299 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3300 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
3301 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3302 for db, pkg_type, built, installed, db_keys in dbs:
3305 if hasattr(db, "xmatch"):
3306 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
3308 cpv_list = db.match(atom.without_use)
3310 if atom.repo is None and hasattr(db, "getRepositories"):
3311 repo_list = db.getRepositories()
3313 repo_list = [atom.repo]
3317 for cpv in cpv_list:
3318 for repo in repo_list:
3319 if not db.cpv_exists(cpv, myrepo=repo):
3322 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
3323 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
3324 if metadata is not None and \
3325 portage.eapi_is_supported(metadata["EAPI"]):
3327 repo = metadata.get('repository')
3328 pkg = self._pkg(cpv, pkg_type, root_config,
3329 installed=installed, myrepo=repo)
3330 # pkg.metadata contains calculated USE for ebuilds,
3331 # required later for getMissingLicenses.
3332 metadata = pkg.metadata
3334 # Avoid doing any operations with packages that
3335 # have invalid metadata. It would be unsafe at
3336 # least because it could trigger unhandled
3337 # exceptions in places like check_required_use().
3338 masked_packages.append(
3339 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3341 if not atom_set.findAtomForPackage(pkg,
3342 modified_use=self._pkg_use_enabled(pkg)):
3344 if pkg in self._dynamic_config._runtime_pkg_mask:
3345 backtrack_reasons = \
3346 self._dynamic_config._runtime_pkg_mask[pkg]
3347 mreasons.append('backtracking: %s' % \
3348 ', '.join(sorted(backtrack_reasons)))
3349 backtrack_mask = True
3350 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3351 modified_use=self._pkg_use_enabled(pkg)):
3352 mreasons = ["exclude option"]
3354 masked_pkg_instances.add(pkg)
3355 if atom.unevaluated_atom.use:
3357 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3358 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3359 missing_use.append(pkg)
3360 if atom_set_with_use.findAtomForPackage(pkg):
3361 autounmask_broke_use_dep = True
3365 writemsg("violated_conditionals raised " + \
3366 "InvalidAtom: '%s' parent: %s" % \
3367 (atom, myparent), noiselevel=-1)
3369 if not mreasons and \
3371 pkg.metadata.get("REQUIRED_USE") and \
3372 eapi_has_required_use(pkg.metadata["EAPI"]):
3373 if not check_required_use(
3374 pkg.metadata["REQUIRED_USE"],
3375 self._pkg_use_enabled(pkg),
3376 pkg.iuse.is_valid_flag,
3377 eapi=pkg.metadata["EAPI"]):
3378 required_use_unsatisfied.append(pkg)
3380 root_slot = (pkg.root, pkg.slot_atom)
3381 if pkg.built and root_slot in self._rebuild.rebuild_list:
3382 mreasons = ["need to rebuild from source"]
3383 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3384 mreasons = ["need to rebuild from source"]
3385 elif pkg.built and not mreasons:
3386 mreasons = ["use flag configuration mismatch"]
3387 masked_packages.append(
3388 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3392 raise self._backtrack_mask()
3396 if check_autounmask_breakage:
3397 if autounmask_broke_use_dep:
3398 raise self._autounmask_breakage()
3402 missing_use_reasons = []
3403 missing_iuse_reasons = []
3404 for pkg in missing_use:
3405 use = self._pkg_use_enabled(pkg)
3407 #Use the unevaluated atom here, because some flags might have gone
3408 #lost during evaluation.
3409 required_flags = atom.unevaluated_atom.use.required
3410 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3414 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3415 missing_iuse_reasons.append((pkg, mreasons))
3417 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3418 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3420 untouchable_flags = \
3421 frozenset(chain(pkg.use.mask, pkg.use.force))
3422 if any(x in untouchable_flags for x in
3423 chain(need_enable, need_disable)):
3426 missing_use_adjustable.add(pkg)
3427 required_use = pkg.metadata.get("REQUIRED_USE")
3428 required_use_warning = ""
3430 old_use = self._pkg_use_enabled(pkg)
3431 new_use = set(self._pkg_use_enabled(pkg))
3432 for flag in need_enable:
3434 for flag in need_disable:
3435 new_use.discard(flag)
3436 if check_required_use(required_use, old_use,
3437 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) \
3438 and not check_required_use(required_use, new_use,
3439 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
3440 required_use_warning = ", this change violates use flag constraints " + \
3441 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3443 if need_enable or need_disable:
3445 changes.extend(colorize("red", "+" + x) \
3446 for x in need_enable)
3447 changes.extend(colorize("blue", "-" + x) \
3448 for x in need_disable)
3449 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3450 missing_use_reasons.append((pkg, mreasons))
3452 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3453 # Lets see if the violated use deps are conditional.
3454 # If so, suggest to change them on the parent.
3456 # If the child package is masked then a change to
3457 # parent USE is not a valid solution (a normal mask
3458 # message should be displayed instead).
3459 if pkg in masked_pkg_instances:
3463 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3464 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3465 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3466 #all violated use deps are conditional
3468 conditional = violated_atom.use.conditional
3469 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3470 conditional.enabled, conditional.disabled))
3472 untouchable_flags = \
3473 frozenset(chain(myparent.use.mask, myparent.use.force))
3474 if any(x in untouchable_flags for x in involved_flags):
3477 required_use = myparent.metadata.get("REQUIRED_USE")
3478 required_use_warning = ""
3480 old_use = self._pkg_use_enabled(myparent)
3481 new_use = set(self._pkg_use_enabled(myparent))
3482 for flag in involved_flags:
3484 new_use.discard(flag)
3487 if check_required_use(required_use, old_use,
3488 myparent.iuse.is_valid_flag,
3489 eapi=myparent.metadata["EAPI"]) and \
3490 not check_required_use(required_use, new_use,
3491 myparent.iuse.is_valid_flag,
3492 eapi=myparent.metadata["EAPI"]):
3493 required_use_warning = ", this change violates use flag constraints " + \
3494 "defined by %s: '%s'" % (myparent.cpv, \
3495 human_readable_required_use(required_use))
3497 for flag in involved_flags:
3498 if flag in self._pkg_use_enabled(myparent):
3499 changes.append(colorize("blue", "-" + flag))
3501 changes.append(colorize("red", "+" + flag))
3502 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3503 if (myparent, mreasons) not in missing_use_reasons:
3504 missing_use_reasons.append((myparent, mreasons))
3506 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3507 in missing_use_reasons if pkg not in masked_pkg_instances]
3509 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3510 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3512 show_missing_use = False
3513 if unmasked_use_reasons:
3514 # Only show the latest version.
3515 show_missing_use = []
3517 parent_reason = None
3518 for pkg, mreasons in unmasked_use_reasons:
3520 if parent_reason is None:
3521 #This happens if a use change on the parent
3522 #leads to a satisfied conditional use dep.
3523 parent_reason = (pkg, mreasons)
3524 elif pkg_reason is None:
3525 #Don't rely on the first pkg in unmasked_use_reasons,
3526 #being the highest version of the dependency.
3527 pkg_reason = (pkg, mreasons)
3529 show_missing_use.append(pkg_reason)
3531 show_missing_use.append(parent_reason)
3533 elif unmasked_iuse_reasons:
3534 masked_with_iuse = False
3535 for pkg in masked_pkg_instances:
3536 #Use atom.unevaluated here, because some flags might have gone
3537 #lost during evaluation.
3538 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3539 # Package(s) with required IUSE are masked,
3540 # so display a normal masking message.
3541 masked_with_iuse = True
3543 if not masked_with_iuse:
3544 show_missing_use = unmasked_iuse_reasons
3546 if required_use_unsatisfied:
3547 # If there's a higher unmasked version in missing_use_adjustable
3548 # then we want to show that instead.
3549 for pkg in missing_use_adjustable:
3550 if pkg not in masked_pkg_instances and \
3551 pkg > required_use_unsatisfied[0]:
3552 required_use_unsatisfied = False
3557 if show_req_use is None and required_use_unsatisfied:
3558 # We have an unmasked package that only requires USE adjustment
3559 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3560 # that the user wants the latest version, so only the first
3561 # instance is displayed.
3562 show_req_use = required_use_unsatisfied[0]
3564 if show_req_use is not None:
3567 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3568 writemsg("\n!!! " + \
3569 colorize("BAD", "The ebuild selected to satisfy ") + \
3570 colorize("INFORM", xinfo) + \
3571 colorize("BAD", " has unmet requirements.") + "\n",
3573 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3574 writemsg("- %s %s\n" % (output_cpv, use_display),
3576 writemsg("\n The following REQUIRED_USE flag constraints " + \
3577 "are unsatisfied:\n", noiselevel=-1)
3578 reduced_noise = check_required_use(
3579 pkg.metadata["REQUIRED_USE"],
3580 self._pkg_use_enabled(pkg),
3581 pkg.iuse.is_valid_flag,
3582 eapi=pkg.metadata["EAPI"]).tounicode()
3583 writemsg(" %s\n" % \
3584 human_readable_required_use(reduced_noise),
3586 normalized_required_use = \
3587 " ".join(pkg.metadata["REQUIRED_USE"].split())
3588 if reduced_noise != normalized_required_use:
3589 writemsg("\n The above constraints " + \
3590 "are a subset of the following complete expression:\n",
3592 writemsg(" %s\n" % \
3593 human_readable_required_use(normalized_required_use),
3595 writemsg("\n", noiselevel=-1)
3597 elif show_missing_use:
3598 writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3599 writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3600 for pkg, mreasons in show_missing_use:
3601 writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3603 elif masked_packages:
3604 writemsg("\n!!! " + \
3605 colorize("BAD", "All ebuilds that could satisfy ") + \
3606 colorize("INFORM", xinfo) + \
3607 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3608 writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3609 have_eapi_mask = show_masked_packages(masked_packages)
3611 writemsg("\n", noiselevel=-1)
3612 msg = ("The current version of portage supports " + \
3613 "EAPI '%s'. You must upgrade to a newer version" + \
3614 " of portage before EAPI masked packages can" + \
3615 " be installed.") % portage.const.EAPI
3616 writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3617 writemsg("\n", noiselevel=-1)
3621 if not atom.cp.startswith("null/"):
3622 for pkg in self._iter_match_pkgs_any(
3623 root_config, Atom(atom.cp)):
3627 writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3628 if isinstance(myparent, AtomArg) and \
3630 self._frozen_config.myopts.get(
3631 "--misspell-suggestions", "y") != "n":
3632 cp = myparent.atom.cp.lower()
3633 cat, pkg = portage.catsplit(cp)
3637 writemsg("\nemerge: searching for similar names..."
3641 all_cp.update(vardb.cp_all())
3642 if "--usepkgonly" not in self._frozen_config.myopts:
3643 all_cp.update(portdb.cp_all())
3644 if "--usepkg" in self._frozen_config.myopts:
3645 all_cp.update(bindb.cp_all())
3646 # discard dir containing no ebuilds
3650 for cp_orig in all_cp:
3651 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3652 all_cp = set(orig_cp_map)
3655 matches = difflib.get_close_matches(cp, all_cp)
3658 for other_cp in list(all_cp):
3659 other_pkg = portage.catsplit(other_cp)[1]
3660 if other_pkg == pkg:
3661 # Check for non-identical package that
3662 # differs only by upper/lower case.
3664 for cp_orig in orig_cp_map[other_cp]:
3665 if portage.catsplit(cp_orig)[1] != \
3666 portage.catsplit(atom.cp)[1]:
3670 # discard dir containing no ebuilds
3671 all_cp.discard(other_cp)
3673 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3674 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3676 for pkg_match in pkg_matches:
3677 matches.extend(pkg_to_cp[pkg_match])
3679 matches_orig_case = []
3681 matches_orig_case.extend(orig_cp_map[cp])
3682 matches = matches_orig_case
3684 if len(matches) == 1:
3685 writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
3687 elif len(matches) > 1:
3689 "\nemerge: Maybe you meant any of these: %s?\n" % \
3690 (", ".join(matches),), noiselevel=-1)
3692 # Generally, this would only happen if
3693 # all dbapis are empty.
3694 writemsg(" nothing similar found.\n"
3697 if not isinstance(myparent, AtomArg):
3698 # It's redundant to show parent for AtomArg since
3699 # it's the same as 'xinfo' displayed above.
3700 dep_chain = self._get_dep_chain(myparent, atom)
3701 for node, node_type in dep_chain:
3702 msg.append('(dependency required by "%s" [%s])' % \
3703 (colorize('INFORM', _unicode_decode("%s") % \
3704 (node)), node_type))
3707 writemsg("\n".join(msg), noiselevel=-1)
3708 writemsg("\n", noiselevel=-1)
3712 writemsg("\n", noiselevel=-1)
3714 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3715 for db, pkg_type, built, installed, db_keys in \
3716 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3717 for pkg in self._iter_match_pkgs(root_config,
3718 pkg_type, atom, onlydeps=onlydeps):
3721 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3723 Iterate over Package instances of pkg_type matching the given atom.
3724 This does not check visibility and it also does not match USE for
3725 unbuilt ebuilds since USE are lazily calculated after visibility
3726 checks (to avoid the expense when possible).
3729 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3730 atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
3731 cp_list = db.cp_list(atom_exp.cp)
3732 matched_something = False
3733 installed = pkg_type == 'installed'
3736 atom_set = InternalPackageSet(initial_atoms=(atom,),
3738 if atom.repo is None and hasattr(db, "getRepositories"):
3739 repo_list = db.getRepositories()
3741 repo_list = [atom.repo]
3746 # Call match_from_list on one cpv at a time, in order
3747 # to avoid unnecessary match_from_list comparisons on
3748 # versions that are never yielded from this method.
3749 if not match_from_list(atom_exp, [cpv]):
3751 for repo in repo_list:
3754 pkg = self._pkg(cpv, pkg_type, root_config,
3755 installed=installed, onlydeps=onlydeps, myrepo=repo)
3756 except portage.exception.PackageNotFound:
3759 # A cpv can be returned from dbapi.match() as an
3760 # old-style virtual match even in cases when the
3761 # package does not actually PROVIDE the virtual.
3762 # Filter out any such false matches here.
3764 # Make sure that cpv from the current repo satisfies the atom.
3765 # This might not be the case if there are several repos with
3766 # the same cpv, but different metadata keys, like SLOT.
3767 # Also, parts of the match that require metadata access
3768 # are deferred until we have cached the metadata in a
3770 if not atom_set.findAtomForPackage(pkg,
3771 modified_use=self._pkg_use_enabled(pkg)):
3773 matched_something = True
3776 # USE=multislot can make an installed package appear as if
3777 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3778 # won't do any good as long as USE=multislot is enabled since
3779 # the newly built package still won't have the expected slot.
3780 # Therefore, assume that such SLOT dependencies are already
3781 # satisfied rather than forcing a rebuild.
3782 if not matched_something and installed and atom.slot is not None:
3784 if "remove" in self._dynamic_config.myparams:
3785 # We need to search the portdbapi, which is not in our
3786 # normal dbs list, in order to find the real SLOT.
3787 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
3788 db_keys = list(portdb._aux_cache_keys)
3789 dbs = [(portdb, "ebuild", False, False, db_keys)]
3791 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
3793 cp_list = db.cp_list(atom_exp.cp)
3795 atom_set = InternalPackageSet(
3796 initial_atoms=(atom.without_slot,), allow_repo=True)
3797 atom_exp_without_slot = atom_exp.without_slot
3800 if not match_from_list(atom_exp_without_slot, [cpv]):
3802 slot_available = False
3803 for other_db, other_type, other_built, \
3804 other_installed, other_keys in dbs:
3807 other_db.aux_get(cpv, ["SLOT"])[0]:
3808 slot_available = True
3812 if not slot_available:
3814 inst_pkg = self._pkg(cpv, "installed",
3815 root_config, installed=installed, myrepo=atom.repo)
3816 # Remove the slot from the atom and verify that
3817 # the package matches the resulting atom.
3818 if atom_set.findAtomForPackage(inst_pkg):
3822 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3823 cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
3824 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3827 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3828 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3831 if self._pkg_visibility_check(pkg) and \
3832 not (pkg.installed and pkg.masks):
3833 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3836 def _want_installed_pkg(self, pkg):
3838 Given an installed package returned from select_pkg, return
3839 True if the user has not explicitly requested for this package
3840 to be replaced (typically via an atom on the command line).
3842 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
3843 modified_use=self._pkg_use_enabled(pkg)):
3848 for arg, atom in self._iter_atoms_for_pkg(pkg):
3849 if arg.force_reinstall:
3851 except InvalidDependString:
3854 if "selective" in self._dynamic_config.myparams:
3859 def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
3862 pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
3863 except portage.exception.PackageNotFound:
3864 pkg_eb_visible = False
3865 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3866 "ebuild", Atom("=%s" % (pkg.cpv,))):
3867 if self._pkg_visibility_check(pkg_eb, autounmask_level):
3868 pkg_eb_visible = True
3870 if not pkg_eb_visible:
3873 if not self._pkg_visibility_check(pkg_eb, autounmask_level):
3878 def _equiv_binary_installed(self, pkg):
3879 build_time = pkg.metadata.get('BUILD_TIME')
3884 inst_pkg = self._pkg(pkg.cpv, "installed",
3885 pkg.root_config, installed=True)
3886 except PackageNotFound:
3889 return build_time == inst_pkg.metadata.get('BUILD_TIME')
3891 class _AutounmaskLevel(object):
3892 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
3893 "allow_missing_keywords", "allow_unmasks")
3896 self.allow_use_changes = False
3897 self.allow_license_changes = False
3898 self.allow_unstable_keywords = False
3899 self.allow_missing_keywords = False
3900 self.allow_unmasks = False
3902 def _autounmask_levels(self):
3904 Iterate over the different allowed things to unmask.
3908 2. USE + ~arch + license
3909 3. USE + ~arch + license + missing keywords
3910 4. USE + ~arch + license + masks
3911 5. USE + ~arch + license + missing keywords + masks
3914 * Do least invasive changes first.
3915 * Try unmasking alone before unmasking + missing keywords
3916 to avoid -9999 versions if possible
3919 if self._dynamic_config._autounmask is not True:
3922 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
3923 autounmask_level = self._AutounmaskLevel()
3925 autounmask_level.allow_use_changes = True
3926 yield autounmask_level
3928 autounmask_level.allow_license_changes = True
3929 yield autounmask_level
3931 for only_use_changes in (False,):
3933 autounmask_level.allow_unstable_keywords = (not only_use_changes)
3934 autounmask_level.allow_license_changes = (not only_use_changes)
3936 for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
3938 if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
3941 autounmask_level.allow_missing_keywords = missing_keyword
3942 autounmask_level.allow_unmasks = unmask
3944 yield autounmask_level
3947 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3948 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3950 default_selection = (pkg, existing)
3953 if pkg is not None and \
3955 not self._want_installed_pkg(pkg):
3958 if self._dynamic_config._autounmask is True:
3961 for autounmask_level in self._autounmask_levels():
3966 self._wrapped_select_pkg_highest_available_imp(
3967 root, atom, onlydeps=onlydeps,
3968 autounmask_level=autounmask_level)
3972 if self._dynamic_config._need_restart:
3976 # This ensures that we can fall back to an installed package
3977 # that may have been rejected in the autounmask path above.
3978 return default_selection
3980 return pkg, existing
3982 def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
3987 if trust_graph and pkg in self._dynamic_config.digraph:
3988 # Sometimes we need to temporarily disable
3989 # dynamic_config._autounmask, but for overall
3990 # consistency in dependency resolution, in most
3991 # cases we want to treat packages in the graph
3992 # as though they are visible.
3995 if not self._dynamic_config._autounmask or autounmask_level is None:
3998 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3999 root_config = self._frozen_config.roots[pkg.root]
4000 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4002 masked_by_unstable_keywords = False
4003 masked_by_missing_keywords = False
4004 missing_licenses = None
4005 masked_by_something_else = False
4006 masked_by_p_mask = False
4008 for reason in mreasons:
4009 hint = reason.unmask_hint
4012 masked_by_something_else = True
4013 elif hint.key == "unstable keyword":
4014 masked_by_unstable_keywords = True
4015 if hint.value == "**":
4016 masked_by_missing_keywords = True
4017 elif hint.key == "p_mask":
4018 masked_by_p_mask = True
4019 elif hint.key == "license":
4020 missing_licenses = hint.value
4022 masked_by_something_else = True
4024 if masked_by_something_else:
4027 if pkg in self._dynamic_config._needed_unstable_keywords:
4028 #If the package is already keyworded, remove the mask.
4029 masked_by_unstable_keywords = False
4030 masked_by_missing_keywords = False
4032 if pkg in self._dynamic_config._needed_p_mask_changes:
4033 #If the package is already keyworded, remove the mask.
4034 masked_by_p_mask = False
4036 if missing_licenses:
4037 #If the needed licenses are already unmasked, remove the mask.
4038 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
4040 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
4041 #Package has already been unmasked.
4044 if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
4045 (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
4046 (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
4047 (missing_licenses and not autounmask_level.allow_license_changes):
4048 #We are not allowed to do the needed changes.
4051 if masked_by_unstable_keywords:
4052 self._dynamic_config._needed_unstable_keywords.add(pkg)
4053 backtrack_infos = self._dynamic_config._backtrack_infos
4054 backtrack_infos.setdefault("config", {})
4055 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
4056 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
4058 if masked_by_p_mask:
4059 self._dynamic_config._needed_p_mask_changes.add(pkg)
4060 backtrack_infos = self._dynamic_config._backtrack_infos
4061 backtrack_infos.setdefault("config", {})
4062 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
4063 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
4065 if missing_licenses:
4066 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
4067 backtrack_infos = self._dynamic_config._backtrack_infos
4068 backtrack_infos.setdefault("config", {})
4069 backtrack_infos["config"].setdefault("needed_license_changes", set())
4070 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
4074 def _pkg_use_enabled(self, pkg, target_use=None):
4076 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
4077 If target_use is given, the need changes are computed to make the package useable.
4078 Example: target_use = { "foo": True, "bar": False }
4079 The flags target_use must be in the pkg's IUSE.
4082 return pkg.use.enabled
4083 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
4085 if target_use is None:
4086 if needed_use_config_change is None:
4087 return pkg.use.enabled
4089 return needed_use_config_change[0]
4091 if needed_use_config_change is not None:
4092 old_use = needed_use_config_change[0]
4094 old_changes = needed_use_config_change[1]
4095 new_changes = old_changes.copy()
4097 old_use = pkg.use.enabled
4102 for flag, state in target_use.items():
4104 if flag not in old_use:
4105 if new_changes.get(flag) == False:
4107 new_changes[flag] = True
4111 if new_changes.get(flag) == True:
4113 new_changes[flag] = False
4114 new_use.update(old_use.difference(target_use))
4116 def want_restart_for_use_change(pkg, new_use):
4117 if pkg not in self._dynamic_config.digraph.nodes:
4120 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
4121 dep = pkg.metadata[key]
4122 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4123 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4125 if old_val != new_val:
4128 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4129 if not parent_atoms:
4132 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
4133 for ppkg, atom in parent_atoms:
4134 if not atom.use or \
4135 not any(x in atom.use.required for x in changes):
4142 if new_changes != old_changes:
4143 #Don't do the change if it violates REQUIRED_USE.
4144 required_use = pkg.metadata.get("REQUIRED_USE")
4145 if required_use and check_required_use(required_use, old_use,
4146 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) and \
4147 not check_required_use(required_use, new_use,
4148 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
4151 if any(x in pkg.use.mask for x in new_changes) or \
4152 any(x in pkg.use.force for x in new_changes):
4155 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
4156 backtrack_infos = self._dynamic_config._backtrack_infos
4157 backtrack_infos.setdefault("config", {})
4158 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
4159 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
4160 if want_restart_for_use_change(pkg, new_use):
4161 self._dynamic_config._need_restart = True
4164 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
4165 root_config = self._frozen_config.roots[root]
4166 pkgsettings = self._frozen_config.pkgsettings[root]
4167 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
4168 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
4169 # List of acceptable packages, ordered by type preference.
4170 matched_packages = []
4171 matched_pkgs_ignore_use = []
4172 highest_version = None
4173 if not isinstance(atom, portage.dep.Atom):
4174 atom = portage.dep.Atom(atom)
4176 have_new_virt = atom_cp.startswith("virtual/") and \
4177 self._have_new_virt(root, atom_cp)
4178 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
4179 existing_node = None
4181 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
4182 usepkg = "--usepkg" in self._frozen_config.myopts
4183 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
4184 empty = "empty" in self._dynamic_config.myparams
4185 selective = "selective" in self._dynamic_config.myparams
4187 avoid_update = "--update" not in self._frozen_config.myopts
4188 dont_miss_updates = "--update" in self._frozen_config.myopts
4189 use_ebuild_visibility = self._frozen_config.myopts.get(
4190 '--use-ebuild-visibility', 'n') != 'n'
4191 reinstall_atoms = self._frozen_config.reinstall_atoms
4192 usepkg_exclude = self._frozen_config.usepkg_exclude
4193 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
4195 # Behavior of the "selective" parameter depends on
4196 # whether or not a package matches an argument atom.
4197 # If an installed package provides an old-style
4198 # virtual that is no longer provided by an available
4199 # package, the installed package may match an argument
4200 # atom even though none of the available packages do.
4201 # Therefore, "selective" logic does not consider
4202 # whether or not an installed package matches an
4203 # argument atom. It only considers whether or not
4204 # available packages match argument atoms, which is
4205 # represented by the found_available_arg flag.
4206 found_available_arg = False
4207 packages_with_invalid_use_config = []
4208 for find_existing_node in True, False:
4211 for db, pkg_type, built, installed, db_keys in dbs:
4214 if installed and not find_existing_node:
4215 want_reinstall = reinstall or empty or \
4216 (found_available_arg and not selective)
4217 if want_reinstall and matched_packages:
4220 # Ignore USE deps for the initial match since we want to
4221 # ensure that updates aren't missed solely due to the user's
4222 # USE configuration.
4223 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
4225 if pkg.cp != atom_cp and have_new_virt:
4226 # pull in a new-style virtual instead
4228 if pkg in self._dynamic_config._runtime_pkg_mask:
4229 # The package has been masked by the backtracking logic
4231 root_slot = (pkg.root, pkg.slot_atom)
4232 if pkg.built and root_slot in self._rebuild.rebuild_list:
4234 if (pkg.installed and
4235 root_slot in self._rebuild.reinstall_list):
4238 if not pkg.installed and \
4239 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
4240 modified_use=self._pkg_use_enabled(pkg)):
4243 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
4244 modified_use=self._pkg_use_enabled(pkg)):
4247 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
4248 modified_use=self._pkg_use_enabled(pkg))
4250 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
4251 (not pkg.installed or dont_miss_updates):
4252 # Check if a higher version was rejected due to user
4253 # USE configuration. The packages_with_invalid_use_config
4254 # list only contains unbuilt ebuilds since USE can't
4255 # be changed for built packages.
4256 higher_version_rejected = False
4257 repo_priority = pkg.repo_priority
4258 for rejected in packages_with_invalid_use_config:
4259 if rejected.cp != pkg.cp:
4262 higher_version_rejected = True
4264 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
4265 # If version is identical then compare
4266 # repo priority (see bug #350254).
4267 rej_repo_priority = rejected.repo_priority
4268 if rej_repo_priority is not None and \
4269 (repo_priority is None or
4270 rej_repo_priority > repo_priority):
4271 higher_version_rejected = True
4273 if higher_version_rejected:
4277 reinstall_for_flags = None
4279 if not pkg.installed or \
4280 (matched_packages and not avoid_update):
4281 # Only enforce visibility on installed packages
4282 # if there is at least one other visible package
4283 # available. By filtering installed masked packages
4284 # here, packages that have been masked since they
4285 # were installed can be automatically downgraded
4286 # to an unmasked version. NOTE: This code needs to
4287 # be consistent with masking behavior inside
4288 # _dep_check_composite_db, in order to prevent
4289 # incorrect choices in || deps like bug #351828.
4291 if not self._pkg_visibility_check(pkg, autounmask_level):
4294 # Enable upgrade or downgrade to a version
4295 # with visible KEYWORDS when the installed
4296 # version is masked by KEYWORDS, but never
4297 # reinstall the same exact version only due
4298 # to a KEYWORDS mask. See bug #252167.
4300 if pkg.type_name != "ebuild" and matched_packages:
4301 # Don't re-install a binary package that is
4302 # identical to the currently installed package
4303 # (see bug #354441).
4304 identical_binary = False
4305 if usepkg and pkg.installed:
4306 for selected_pkg in matched_packages:
4307 if selected_pkg.type_name == "binary" and \
4308 selected_pkg.cpv == pkg.cpv and \
4309 selected_pkg.metadata.get('BUILD_TIME') == \
4310 pkg.metadata.get('BUILD_TIME'):
4311 identical_binary = True
4314 if not identical_binary:
4315 # If the ebuild no longer exists or it's
4316 # keywords have been dropped, reject built
4317 # instances (installed or binary).
4318 # If --usepkgonly is enabled, assume that
4319 # the ebuild status should be ignored.
4320 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
4321 if pkg.installed and pkg.masks:
4323 elif not self._equiv_ebuild_visible(pkg,
4324 autounmask_level=autounmask_level):
4327 # Calculation of USE for unbuilt ebuilds is relatively
4328 # expensive, so it is only performed lazily, after the
4329 # above visibility checks are complete.
4333 for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
4334 if myarg.force_reinstall:
4337 except InvalidDependString:
4339 # masked by corruption
4341 if not installed and myarg:
4342 found_available_arg = True
4344 if atom.unevaluated_atom.use:
4345 #Make sure we don't miss a 'missing IUSE'.
4346 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
4347 # Don't add this to packages_with_invalid_use_config
4348 # since IUSE cannot be adjusted by the user.
4353 matched_pkgs_ignore_use.append(pkg)
4354 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
4356 for flag in atom.use.enabled:
4357 target_use[flag] = True
4358 for flag in atom.use.disabled:
4359 target_use[flag] = False
4360 use = self._pkg_use_enabled(pkg, target_use)
4362 use = self._pkg_use_enabled(pkg)
4365 can_adjust_use = not pkg.built
4366 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
4367 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
4369 if atom.use.enabled:
4370 if any(x in atom.use.enabled for x in missing_disabled):
4372 can_adjust_use = False
4373 need_enabled = atom.use.enabled.difference(use)
4375 need_enabled = need_enabled.difference(missing_enabled)
4379 if any(x in pkg.use.mask for x in need_enabled):
4380 can_adjust_use = False
4382 if atom.use.disabled:
4383 if any(x in atom.use.disabled for x in missing_enabled):
4385 can_adjust_use = False
4386 need_disabled = atom.use.disabled.intersection(use)
4388 need_disabled = need_disabled.difference(missing_disabled)
4392 if any(x in pkg.use.force and x not in
4393 pkg.use.mask for x in need_disabled):
4394 can_adjust_use = False
4398 # Above we must ensure that this package has
4399 # absolutely no use.force, use.mask, or IUSE
4400 # issues that the user typically can't make
4401 # adjustments to solve (see bug #345979).
4402 # FIXME: Conditional USE deps complicate
4403 # issues. This code currently excludes cases
4404 # in which the user can adjust the parent
4405 # package's USE in order to satisfy the dep.
4406 packages_with_invalid_use_config.append(pkg)
4409 if pkg.cp == atom_cp:
4410 if highest_version is None:
4411 highest_version = pkg
4412 elif pkg > highest_version:
4413 highest_version = pkg
4414 # At this point, we've found the highest visible
4415 # match from the current repo. Any lower versions
4416 # from this repo are ignored, so this so the loop
4417 # will always end with a break statement below
4419 if find_existing_node:
4420 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4424 # Use PackageSet.findAtomForPackage()
4425 # for PROVIDE support.
4426 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
4427 if highest_version and \
4428 e_pkg.cp == atom_cp and \
4429 e_pkg < highest_version and \
4430 e_pkg.slot_atom != highest_version.slot_atom:
4431 # There is a higher version available in a
4432 # different slot, so this existing node is
4436 matched_packages.append(e_pkg)
4437 existing_node = e_pkg
4439 # Compare built package to current config and
4440 # reject the built package if necessary.
4441 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4442 ("--newuse" in self._frozen_config.myopts or \
4443 "--reinstall" in self._frozen_config.myopts or \
4444 (not installed and self._dynamic_config.myparams.get(
4445 "binpkg_respect_use") in ("y", "auto"))):
4446 iuses = pkg.iuse.all
4447 old_use = self._pkg_use_enabled(pkg)
4449 pkgsettings.setcpv(myeb)
4451 pkgsettings.setcpv(pkg)
4452 now_use = pkgsettings["PORTAGE_USE"].split()
4453 forced_flags = set()
4454 forced_flags.update(pkgsettings.useforce)
4455 forced_flags.update(pkgsettings.usemask)
4457 if myeb and not usepkgonly and not useoldpkg:
4458 cur_iuse = myeb.iuse.all
4459 reinstall_for_flags = self._reinstall_for_flags(pkg,
4460 forced_flags, old_use, iuses, now_use, cur_iuse)
4461 if reinstall_for_flags:
4462 if not pkg.installed:
4463 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4465 # Compare current config to installed package
4466 # and do not reinstall if possible.
4467 if not installed and not useoldpkg and \
4468 ("--newuse" in self._frozen_config.myopts or \
4469 "--reinstall" in self._frozen_config.myopts) and \
4470 cpv in vardb.match(atom):
4471 forced_flags = set()
4472 forced_flags.update(pkg.use.force)
4473 forced_flags.update(pkg.use.mask)
4474 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4475 old_use = inst_pkg.use.enabled
4476 old_iuse = inst_pkg.iuse.all
4477 cur_use = self._pkg_use_enabled(pkg)
4478 cur_iuse = pkg.iuse.all
4479 reinstall_for_flags = \
4480 self._reinstall_for_flags(pkg,
4481 forced_flags, old_use, old_iuse,
4483 if reinstall_for_flags:
4485 if reinstall_atoms.findAtomForPackage(pkg, \
4486 modified_use=self._pkg_use_enabled(pkg)):
4491 matched_oldpkg.append(pkg)
4492 matched_packages.append(pkg)
4493 if reinstall_for_flags:
4494 self._dynamic_config._reinstall_nodes[pkg] = \
4498 if not matched_packages:
4501 if "--debug" in self._frozen_config.myopts:
4502 for pkg in matched_packages:
4503 portage.writemsg("%s %s%s%s\n" % \
4504 ((pkg.type_name + ":").rjust(10),
4505 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4507 # Filter out any old-style virtual matches if they are
4508 # mixed with new-style virtual matches.
4510 if len(matched_packages) > 1 and \
4511 "virtual" == portage.catsplit(cp)[0]:
4512 for pkg in matched_packages:
4515 # Got a new-style virtual, so filter
4516 # out any old-style virtuals.
4517 matched_packages = [pkg for pkg in matched_packages \
4521 if existing_node is not None and \
4522 existing_node in matched_packages:
4523 return existing_node, existing_node
4525 if len(matched_packages) > 1:
4526 if rebuilt_binaries:
4530 for pkg in matched_packages:
4536 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4538 if built_pkg is not None and inst_pkg is not None:
4539 # Only reinstall if binary package BUILD_TIME is
4540 # non-empty, in order to avoid cases like to
4541 # bug #306659 where BUILD_TIME fields are missing
4542 # in local and/or remote Packages file.
4544 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
4545 except (KeyError, ValueError):
4549 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
4550 except (KeyError, ValueError):
4551 installed_timestamp = 0
4553 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4555 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4556 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4557 if built_timestamp and \
4558 built_timestamp > installed_timestamp and \
4559 built_timestamp >= minimal_timestamp:
4560 return built_pkg, existing_node
4562 #Don't care if the binary has an older BUILD_TIME than the installed
4563 #package. This is for closely tracking a binhost.
4564 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4566 if built_timestamp and \
4567 built_timestamp != installed_timestamp:
4568 return built_pkg, existing_node
4570 for pkg in matched_packages:
4571 if pkg.installed and pkg.invalid:
4572 matched_packages = [x for x in \
4573 matched_packages if x is not pkg]
4576 for pkg in matched_packages:
4577 if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
4578 return pkg, existing_node
4580 visible_matches = []
4582 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4583 if self._pkg_visibility_check(pkg, autounmask_level)]
4584 if not visible_matches:
4585 visible_matches = [pkg.cpv for pkg in matched_packages \
4586 if self._pkg_visibility_check(pkg, autounmask_level)]
4588 bestmatch = portage.best(visible_matches)
4590 # all are masked, so ignore visibility
4591 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4592 matched_packages = [pkg for pkg in matched_packages \
4593 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4595 # ordered by type preference ("ebuild" type is the last resort)
4596 return matched_packages[-1], existing_node
4598 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4600 Select packages that have already been added to the graph or
4601 those that are installed and have not been scheduled for
4604 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4605 matches = graph_db.match_pkgs(atom)
4608 pkg = matches[-1] # highest match
4609 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4610 return pkg, in_graph
4612 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4614 Select packages that are installed.
4616 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4620 if len(matches) > 1:
4621 matches.reverse() # ascending order
4622 unmasked = [pkg for pkg in matches if \
4623 self._pkg_visibility_check(pkg)]
4625 if len(unmasked) == 1:
4628 # Account for packages with masks (like KEYWORDS masks)
4629 # that are usually ignored in visibility checks for
4630 # installed packages, in order to handle cases like
4632 unmasked = [pkg for pkg in matches if not pkg.masks]
4635 pkg = matches[-1] # highest match
4636 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4637 return pkg, in_graph
4639 def _complete_graph(self, required_sets=None):
4641 Add any deep dependencies of required sets (args, system, world) that
4642 have not been pulled into the graph yet. This ensures that the graph
4643 is consistent such that initially satisfied deep dependencies are not
4644 broken in the new graph. Initially unsatisfied dependencies are
4645 irrelevant since we only want to avoid breaking dependencies that are
4646 initially satisfied.
4648 Since this method can consume enough time to disturb users, it is
4649 currently only enabled by the --complete-graph option.
4651 @param required_sets: contains required sets (currently only used
4652 for depclean and prune removal operations)
4653 @type required_sets: dict
4655 if "--buildpkgonly" in self._frozen_config.myopts or \
4656 "recurse" not in self._dynamic_config.myparams:
4659 complete_if_new_use = self._dynamic_config.myparams.get(
4660 "complete_if_new_use", "y") == "y"
4661 complete_if_new_ver = self._dynamic_config.myparams.get(
4662 "complete_if_new_ver", "y") == "y"
4663 rebuild_if_new_slot = self._dynamic_config.myparams.get(
4664 "rebuild_if_new_slot", "y") == "y"
4665 complete_if_new_slot = rebuild_if_new_slot
4667 if "complete" not in self._dynamic_config.myparams and \
4668 (complete_if_new_use or
4669 complete_if_new_ver or complete_if_new_slot):
4670 # Enable complete mode if an installed package will change somehow.
4672 version_change = False
4673 for node in self._dynamic_config.digraph:
4674 if not isinstance(node, Package) or \
4675 node.operation != "merge":
4677 vardb = self._frozen_config.roots[
4678 node.root].trees["vartree"].dbapi
4680 if complete_if_new_use or complete_if_new_ver:
4681 inst_pkg = vardb.match_pkgs(node.slot_atom)
4682 if inst_pkg and inst_pkg[0].cp == node.cp:
4683 inst_pkg = inst_pkg[0]
4684 if complete_if_new_ver and \
4685 (inst_pkg < node or node < inst_pkg):
4686 version_change = True
4689 # Intersect enabled USE with IUSE, in order to
4690 # ignore forced USE from implicit IUSE flags, since
4691 # they're probably irrelevant and they are sensitive
4692 # to use.mask/force changes in the profile.
4693 if complete_if_new_use and \
4694 (node.iuse.all != inst_pkg.iuse.all or
4695 self._pkg_use_enabled(node).intersection(node.iuse.all) !=
4696 self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
4700 if complete_if_new_slot:
4701 cp_list = vardb.match_pkgs(Atom(node.cp))
4702 if (cp_list and cp_list[0].cp == node.cp and
4703 not any(node.slot == pkg.slot for pkg in cp_list)):
4704 version_change = True
4707 if use_change or version_change:
4708 self._dynamic_config.myparams["complete"] = True
4710 if "complete" not in self._dynamic_config.myparams:
4715 # Put the depgraph into a mode that causes it to only
4716 # select packages that have already been added to the
4717 # graph or those that are installed and have not been
4718 # scheduled for replacement. Also, toggle the "deep"
4719 # parameter so that all dependencies are traversed and
4721 self._dynamic_config._complete_mode = True
4722 self._select_atoms = self._select_atoms_from_graph
4723 if "remove" in self._dynamic_config.myparams:
4724 self._select_package = self._select_pkg_from_installed
4726 self._select_package = self._select_pkg_from_graph
4727 self._dynamic_config._traverse_ignored_deps = True
4728 already_deep = self._dynamic_config.myparams.get("deep") is True
4729 if not already_deep:
4730 self._dynamic_config.myparams["deep"] = True
4732 # Invalidate the package selection cache, since
4733 # _select_package has just changed implementations.
4734 for trees in self._dynamic_config._filtered_trees.values():
4735 trees["porttree"].dbapi._clear_cache()
4737 args = self._dynamic_config._initial_arg_list[:]
4738 for root in self._frozen_config.roots:
4739 if root != self._frozen_config.target_root and \
4740 ("remove" in self._dynamic_config.myparams or
4741 self._frozen_config.myopts.get("--root-deps") is not None):
4742 # Only pull in deps for the relevant root.
4744 depgraph_sets = self._dynamic_config.sets[root]
4745 required_set_names = self._frozen_config._required_set_names.copy()
4746 remaining_args = required_set_names.copy()
4747 if required_sets is None or root not in required_sets:
4750 # Removal actions may override sets with temporary
4751 # replacements that have had atoms removed in order
4752 # to implement --deselect behavior.
4753 required_set_names = set(required_sets[root])
4754 depgraph_sets.sets.clear()
4755 depgraph_sets.sets.update(required_sets[root])
4756 if "remove" not in self._dynamic_config.myparams and \
4757 root == self._frozen_config.target_root and \
4759 remaining_args.difference_update(depgraph_sets.sets)
4760 if not remaining_args and \
4761 not self._dynamic_config._ignored_deps and \
4762 not self._dynamic_config._dep_stack:
4764 root_config = self._frozen_config.roots[root]
4765 for s in required_set_names:
4766 pset = depgraph_sets.sets.get(s)
4768 pset = root_config.sets[s]
4769 atom = SETPREFIX + s
4770 args.append(SetArg(arg=atom, pset=pset,
4771 root_config=root_config))
4773 self._set_args(args)
4774 for arg in self._expand_set_args(args, add_to_digraph=True):
4775 for atom in arg.pset.getAtoms():
4776 self._dynamic_config._dep_stack.append(
4777 Dependency(atom=atom, root=arg.root_config.root,
4781 if self._dynamic_config._ignored_deps:
4782 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4783 self._dynamic_config._ignored_deps = []
4784 if not self._create_graph(allow_unsatisfied=True):
4786 # Check the unsatisfied deps to see if any initially satisfied deps
4787 # will become unsatisfied due to an upgrade. Initially unsatisfied
4788 # deps are irrelevant since we only want to avoid breaking deps
4789 # that are initially satisfied.
4790 while self._dynamic_config._unsatisfied_deps:
4791 dep = self._dynamic_config._unsatisfied_deps.pop()
4792 vardb = self._frozen_config.roots[
4793 dep.root].trees["vartree"].dbapi
4794 matches = vardb.match_pkgs(dep.atom)
4796 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4798 # An scheduled installation broke a deep dependency.
4799 # Add the installed package to the graph so that it
4800 # will be appropriately reported as a slot collision
4801 # (possibly solvable via backtracking).
4802 pkg = matches[-1] # highest match
4803 if not self._add_pkg(pkg, dep):
4805 if not self._create_graph(allow_unsatisfied=True):
4809 def _pkg(self, cpv, type_name, root_config, installed=False,
4810 onlydeps=False, myrepo = None):
4812 Get a package instance from the cache, or create a new
4813 one if necessary. Raises PackageNotFound from aux_get if it
4814 failures for some reason (package does not exist or is
4818 # Ensure that we use the specially optimized RootConfig instance
4819 # that refers to FakeVartree instead of the real vartree.
4820 root_config = self._frozen_config.roots[root_config.root]
4821 pkg = self._frozen_config._pkg_cache.get(
4822 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4823 repo_name=myrepo, root_config=root_config,
4824 installed=installed, onlydeps=onlydeps))
4825 if pkg is None and onlydeps and not installed:
4826 # Maybe it already got pulled in as a "merge" node.
4827 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4828 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4829 repo_name=myrepo, root_config=root_config,
4830 installed=installed, onlydeps=False))
4833 tree_type = self.pkg_tree_map[type_name]
4834 db = root_config.trees[tree_type].dbapi
4835 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4836 tree_type].dbapi._aux_cache_keys)
4839 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4841 raise portage.exception.PackageNotFound(cpv)
4843 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4844 installed=installed, metadata=metadata, onlydeps=onlydeps,
4845 root_config=root_config, type_name=type_name)
4847 self._frozen_config._pkg_cache[pkg] = pkg
4849 if not self._pkg_visibility_check(pkg) and \
4850 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4851 slot_key = (pkg.root, pkg.slot_atom)
4852 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4853 if other_pkg is None or pkg > other_pkg:
4854 self._frozen_config._highest_license_masked[slot_key] = pkg
4858 def _validate_blockers(self):
4859 """Remove any blockers from the digraph that do not match any of the
4860 packages within the graph. If necessary, create hard deps to ensure
4861 correct merge order such that mutually blocking packages are never
4862 installed simultaneously. Also add runtime blockers from all installed
4863 packages if any of them haven't been added already (bug 128809)."""
4865 if "--buildpkgonly" in self._frozen_config.myopts or \
4866 "--nodeps" in self._frozen_config.myopts:
4870 # Pull in blockers from all installed packages that haven't already
4871 # been pulled into the depgraph, in order to ensure that they are
4872 # respected (bug 128809). Due to the performance penalty that is
4873 # incurred by all the additional dep_check calls that are required,
4874 # blockers returned from dep_check are cached on disk by the
4875 # BlockerCache class.
4877 # For installed packages, always ignore blockers from DEPEND since
4878 # only runtime dependencies should be relevant for packages that
4879 # are already built.
4880 dep_keys = ["RDEPEND", "PDEPEND"]
4881 for myroot in self._frozen_config.trees:
4883 if self._frozen_config.myopts.get("--root-deps") is not None and \
4884 myroot != self._frozen_config.target_root:
4887 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4888 pkgsettings = self._frozen_config.pkgsettings[myroot]
4889 root_config = self._frozen_config.roots[myroot]
4890 final_db = self._dynamic_config.mydbapi[myroot]
4892 blocker_cache = BlockerCache(myroot, vardb)
4893 stale_cache = set(blocker_cache)
4896 stale_cache.discard(cpv)
4897 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4899 pkg in self._dynamic_config._traversed_pkg_deps
4901 # Check for masked installed packages. Only warn about
4902 # packages that are in the graph in order to avoid warning
4903 # about those that will be automatically uninstalled during
4904 # the merge process or by --depclean. Always warn about
4905 # packages masked by license, since the user likely wants
4906 # to adjust ACCEPT_LICENSE.
4908 if not self._pkg_visibility_check(pkg,
4909 trust_graph=False) and \
4910 (pkg_in_graph or 'LICENSE' in pkg.masks):
4911 self._dynamic_config._masked_installed.add(pkg)
4913 self._check_masks(pkg)
4915 blocker_atoms = None
4921 self._dynamic_config._blocker_parents.child_nodes(pkg))
4926 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4930 # Select just the runtime blockers.
4931 blockers = [blocker for blocker in blockers \
4932 if blocker.priority.runtime or \
4933 blocker.priority.runtime_post]
4934 if blockers is not None:
4935 blockers = set(blocker.atom for blocker in blockers)
4937 # If this node has any blockers, create a "nomerge"
4938 # node for it so that they can be enforced.
4939 self._spinner_update()
4940 blocker_data = blocker_cache.get(cpv)
4941 if blocker_data is not None and \
4942 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4945 # If blocker data from the graph is available, use
4946 # it to validate the cache and update the cache if
4948 if blocker_data is not None and \
4949 blockers is not None:
4950 if not blockers.symmetric_difference(
4951 blocker_data.atoms):
4955 if blocker_data is None and \
4956 blockers is not None:
4957 # Re-use the blockers from the graph.
4958 blocker_atoms = sorted(blockers)
4959 counter = long(pkg.metadata["COUNTER"])
4961 blocker_cache.BlockerData(counter, blocker_atoms)
4962 blocker_cache[pkg.cpv] = blocker_data
4966 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4968 # Use aux_get() to trigger FakeVartree global
4969 # updates on *DEPEND when appropriate.
4970 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4971 # It is crucial to pass in final_db here in order to
4972 # optimize dep_check calls by eliminating atoms via
4973 # dep_wordreduce and dep_eval calls.
4975 success, atoms = portage.dep_check(depstr,
4976 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4977 trees=self._dynamic_config._graph_trees, myroot=myroot)
4980 except Exception as e:
4981 # This is helpful, for example, if a ValueError
4982 # is thrown from cpv_expand due to multiple
4983 # matches (this can happen if an atom lacks a
4985 show_invalid_depstring_notice(
4986 pkg, depstr, _unicode_decode("%s") % (e,))
4990 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4991 if replacement_pkg and \
4992 replacement_pkg[0].operation == "merge":
4993 # This package is being replaced anyway, so
4994 # ignore invalid dependencies so as not to
4995 # annoy the user too much (otherwise they'd be
4996 # forced to manually unmerge it first).
4998 show_invalid_depstring_notice(pkg, depstr, atoms)
5000 blocker_atoms = [myatom for myatom in atoms \
5002 blocker_atoms.sort()
5003 counter = long(pkg.metadata["COUNTER"])
5004 blocker_cache[cpv] = \
5005 blocker_cache.BlockerData(counter, blocker_atoms)
5008 for atom in blocker_atoms:
5009 blocker = Blocker(atom=atom,
5010 eapi=pkg.metadata["EAPI"],
5011 priority=self._priority(runtime=True),
5013 self._dynamic_config._blocker_parents.add(blocker, pkg)
5014 except portage.exception.InvalidAtom as e:
5015 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5016 show_invalid_depstring_notice(
5018 _unicode_decode("Invalid Atom: %s") % (e,))
5020 for cpv in stale_cache:
5021 del blocker_cache[cpv]
5022 blocker_cache.flush()
5025 # Discard any "uninstall" tasks scheduled by previous calls
5026 # to this method, since those tasks may not make sense given
5027 # the current graph state.
5028 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
5029 if previous_uninstall_tasks:
5030 self._dynamic_config._blocker_uninstalls = digraph()
5031 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
5033 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
5034 self._spinner_update()
5035 root_config = self._frozen_config.roots[blocker.root]
5036 virtuals = root_config.settings.getvirtuals()
5037 myroot = blocker.root
5038 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
5039 final_db = self._dynamic_config.mydbapi[myroot]
5041 provider_virtual = False
5042 if blocker.cp in virtuals and \
5043 not self._have_new_virt(blocker.root, blocker.cp):
5044 provider_virtual = True
5046 # Use this to check PROVIDE for each matched package
5048 atom_set = InternalPackageSet(
5049 initial_atoms=[blocker.atom])
5051 if provider_virtual:
5053 for provider_entry in virtuals[blocker.cp]:
5054 atoms.append(Atom(blocker.atom.replace(
5055 blocker.cp, provider_entry.cp, 1)))
5057 atoms = [blocker.atom]
5059 blocked_initial = set()
5061 for pkg in initial_db.match_pkgs(atom):
5062 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5063 blocked_initial.add(pkg)
5065 blocked_final = set()
5067 for pkg in final_db.match_pkgs(atom):
5068 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5069 blocked_final.add(pkg)
5071 if not blocked_initial and not blocked_final:
5072 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
5073 self._dynamic_config._blocker_parents.remove(blocker)
5074 # Discard any parents that don't have any more blockers.
5075 for pkg in parent_pkgs:
5076 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
5077 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
5078 self._dynamic_config._blocker_parents.remove(pkg)
5080 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
5081 unresolved_blocks = False
5082 depends_on_order = set()
5083 for pkg in blocked_initial:
5084 if pkg.slot_atom == parent.slot_atom and \
5085 not blocker.atom.blocker.overlap.forbid:
5086 # New !!atom blockers do not allow temporary
5087 # simulaneous installation, so unlike !atom
5088 # blockers, !!atom blockers aren't ignored
5089 # when they match other packages occupying
5092 if parent.installed:
5093 # Two currently installed packages conflict with
5094 # eachother. Ignore this case since the damage
5095 # is already done and this would be likely to
5096 # confuse users if displayed like a normal blocker.
5099 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5101 if parent.operation == "merge":
5102 # Maybe the blocked package can be replaced or simply
5103 # unmerged to resolve this block.
5104 depends_on_order.add((pkg, parent))
5106 # None of the above blocker resolutions techniques apply,
5107 # so apparently this one is unresolvable.
5108 unresolved_blocks = True
5109 for pkg in blocked_final:
5110 if pkg.slot_atom == parent.slot_atom and \
5111 not blocker.atom.blocker.overlap.forbid:
5112 # New !!atom blockers do not allow temporary
5113 # simulaneous installation, so unlike !atom
5114 # blockers, !!atom blockers aren't ignored
5115 # when they match other packages occupying
5118 if parent.operation == "nomerge" and \
5119 pkg.operation == "nomerge":
5120 # This blocker will be handled the next time that a
5121 # merge of either package is triggered.
5124 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5126 # Maybe the blocking package can be
5127 # unmerged to resolve this block.
5128 if parent.operation == "merge" and pkg.installed:
5129 depends_on_order.add((pkg, parent))
5131 elif parent.operation == "nomerge":
5132 depends_on_order.add((parent, pkg))
5134 # None of the above blocker resolutions techniques apply,
5135 # so apparently this one is unresolvable.
5136 unresolved_blocks = True
5138 # Make sure we don't unmerge any package that have been pulled
5140 if not unresolved_blocks and depends_on_order:
5141 for inst_pkg, inst_task in depends_on_order:
5142 if self._dynamic_config.digraph.contains(inst_pkg) and \
5143 self._dynamic_config.digraph.parent_nodes(inst_pkg):
5144 unresolved_blocks = True
5147 if not unresolved_blocks and depends_on_order:
5148 for inst_pkg, inst_task in depends_on_order:
5149 uninst_task = Package(built=inst_pkg.built,
5150 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5151 metadata=inst_pkg.metadata,
5152 operation="uninstall",
5153 root_config=inst_pkg.root_config,
5154 type_name=inst_pkg.type_name)
5155 # Enforce correct merge order with a hard dep.
5156 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
5157 priority=BlockerDepPriority.instance)
5158 # Count references to this blocker so that it can be
5159 # invalidated after nodes referencing it have been
5161 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
5162 if not unresolved_blocks and not depends_on_order:
5163 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
5164 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
5165 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
5166 self._dynamic_config._blocker_parents.remove(blocker)
5167 if not self._dynamic_config._blocker_parents.child_nodes(parent):
5168 self._dynamic_config._blocker_parents.remove(parent)
5169 if unresolved_blocks:
5170 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
5174 def _accept_blocker_conflicts(self):
5176 for x in ("--buildpkgonly", "--fetchonly",
5177 "--fetch-all-uri", "--nodeps"):
5178 if x in self._frozen_config.myopts:
5183 def _merge_order_bias(self, mygraph):
5185 For optimal leaf node selection, promote deep system runtime deps and
5186 order nodes from highest to lowest overall reference count.
5190 for node in mygraph.order:
5191 node_info[node] = len(mygraph.parent_nodes(node))
5192 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
5194 def cmp_merge_preference(node1, node2):
5196 if node1.operation == 'uninstall':
5197 if node2.operation == 'uninstall':
5201 if node2.operation == 'uninstall':
5202 if node1.operation == 'uninstall':
5206 node1_sys = node1 in deep_system_deps
5207 node2_sys = node2 in deep_system_deps
5208 if node1_sys != node2_sys:
5213 return node_info[node2] - node_info[node1]
5215 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
5217 def altlist(self, reversed=False):
5219 while self._dynamic_config._serialized_tasks_cache is None:
5220 self._resolve_conflicts()
5222 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
5223 self._serialize_tasks()
5224 except self._serialize_tasks_retry:
5227 retlist = self._dynamic_config._serialized_tasks_cache[:]
5232 def _implicit_libc_deps(self, mergelist, graph):
5234 Create implicit dependencies on libc, in order to ensure that libc
5235 is installed as early as possible (see bug #303567).
5238 implicit_libc_roots = (self._frozen_config._running_root.root,)
5239 for root in implicit_libc_roots:
5240 graphdb = self._dynamic_config.mydbapi[root]
5241 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5242 for atom in self._expand_virt_from_graph(root,
5243 portage.const.LIBC_PACKAGE_ATOM):
5246 match = graphdb.match_pkgs(atom)
5250 if pkg.operation == "merge" and \
5251 not vardb.cpv_exists(pkg.cpv):
5252 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
5257 earlier_libc_pkgs = set()
5259 for pkg in mergelist:
5260 if not isinstance(pkg, Package):
5261 # a satisfied blocker
5263 root_libc_pkgs = libc_pkgs.get(pkg.root)
5264 if root_libc_pkgs is not None and \
5265 pkg.operation == "merge":
5266 if pkg in root_libc_pkgs:
5267 earlier_libc_pkgs.add(pkg)
5269 for libc_pkg in root_libc_pkgs:
5270 if libc_pkg in earlier_libc_pkgs:
5271 graph.add(libc_pkg, pkg,
5272 priority=DepPriority(buildtime=True))
5274 def schedulerGraph(self):
5276 The scheduler graph is identical to the normal one except that
5277 uninstall edges are reversed in specific cases that require
5278 conflicting packages to be temporarily installed simultaneously.
5279 This is intended for use by the Scheduler in it's parallelization
5280 logic. It ensures that temporary simultaneous installation of
5281 conflicting packages is avoided when appropriate (especially for
5282 !!atom blockers), but allowed in specific cases that require it.
5284 Note that this method calls break_refs() which alters the state of
5285 internal Package instances such that this depgraph instance should
5286 not be used to perform any more calculations.
5289 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
5290 mergelist = self.altlist()
5291 self._implicit_libc_deps(mergelist,
5292 self._dynamic_config._scheduler_graph)
5294 # Break DepPriority.satisfied attributes which reference
5295 # installed Package instances.
5296 for parents, children, node in \
5297 self._dynamic_config._scheduler_graph.nodes.values():
5298 for priorities in chain(parents.values(), children.values()):
5299 for priority in priorities:
5300 if priority.satisfied:
5301 priority.satisfied = True
5303 pkg_cache = self._frozen_config._pkg_cache
5304 graph = self._dynamic_config._scheduler_graph
5305 trees = self._frozen_config.trees
5306 pruned_pkg_cache = {}
5307 for key, pkg in pkg_cache.items():
5308 if pkg in graph or \
5309 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
5310 pruned_pkg_cache[key] = pkg
5313 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
5317 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
5321 def break_refs(self):
5323 Break any references in Package instances that lead back to the depgraph.
5324 This is useful if you want to hold references to packages without also
5325 holding the depgraph on the heap. It should only be called after the
5326 depgraph and _frozen_config will not be used for any more calculations.
5328 for root_config in self._frozen_config.roots.values():
5329 root_config.update(self._frozen_config._trees_orig[
5330 root_config.root]["root_config"])
5331 # Both instances are now identical, so discard the
5332 # original which should have no other references.
5333 self._frozen_config._trees_orig[
5334 root_config.root]["root_config"] = root_config
5336 def _resolve_conflicts(self):
5338 if "complete" not in self._dynamic_config.myparams and \
5339 self._dynamic_config._allow_backtracking and \
5340 self._dynamic_config._slot_collision_nodes and \
5341 not self._accept_blocker_conflicts():
5342 self._dynamic_config.myparams["complete"] = True
5344 if not self._complete_graph():
5345 raise self._unknown_internal_error()
5347 self._process_slot_conflicts()
5349 self._slot_operator_trigger_reinstalls()
5351 if not self._validate_blockers():
5352 self._dynamic_config._skip_restart = True
5353 raise self._unknown_internal_error()
5355 def _serialize_tasks(self):
5357 debug = "--debug" in self._frozen_config.myopts
5360 writemsg("\ndigraph:\n\n", noiselevel=-1)
5361 self._dynamic_config.digraph.debug_print()
5362 writemsg("\n", noiselevel=-1)
5364 scheduler_graph = self._dynamic_config.digraph.copy()
5366 if '--nodeps' in self._frozen_config.myopts:
5367 # Preserve the package order given on the command line.
5368 return ([node for node in scheduler_graph \
5369 if isinstance(node, Package) \
5370 and node.operation == 'merge'], scheduler_graph)
5372 mygraph=self._dynamic_config.digraph.copy()
5374 removed_nodes = set()
5376 # Prune off all DependencyArg instances since they aren't
5377 # needed, and because of nested sets this is faster than doing
5378 # it with multiple digraph.root_nodes() calls below. This also
5379 # takes care of nested sets that have circular references,
5380 # which wouldn't be matched by digraph.root_nodes().
5381 for node in mygraph:
5382 if isinstance(node, DependencyArg):
5383 removed_nodes.add(node)
5385 mygraph.difference_update(removed_nodes)
5386 removed_nodes.clear()
5388 # Prune "nomerge" root nodes if nothing depends on them, since
5389 # otherwise they slow down merge order calculation. Don't remove
5390 # non-root nodes since they help optimize merge order in some cases
5391 # such as revdep-rebuild.
5394 for node in mygraph.root_nodes():
5395 if not isinstance(node, Package) or \
5396 node.installed or node.onlydeps:
5397 removed_nodes.add(node)
5399 self._spinner_update()
5400 mygraph.difference_update(removed_nodes)
5401 if not removed_nodes:
5403 removed_nodes.clear()
5404 self._merge_order_bias(mygraph)
5405 def cmp_circular_bias(n1, n2):
5407 RDEPEND is stronger than PDEPEND and this function
5408 measures such a strength bias within a circular
5409 dependency relationship.
5411 n1_n2_medium = n2 in mygraph.child_nodes(n1,
5412 ignore_priority=priority_range.ignore_medium_soft)
5413 n2_n1_medium = n1 in mygraph.child_nodes(n2,
5414 ignore_priority=priority_range.ignore_medium_soft)
5415 if n1_n2_medium == n2_n1_medium:
5420 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
5422 # Contains uninstall tasks that have been scheduled to
5423 # occur after overlapping blockers have been installed.
5424 scheduled_uninstalls = set()
5425 # Contains any Uninstall tasks that have been ignored
5426 # in order to avoid the circular deps code path. These
5427 # correspond to blocker conflicts that could not be
5429 ignored_uninstall_tasks = set()
5430 have_uninstall_task = False
5431 complete = "complete" in self._dynamic_config.myparams
5434 def get_nodes(**kwargs):
5436 Returns leaf nodes excluding Uninstall instances
5437 since those should be executed as late as possible.
5439 return [node for node in mygraph.leaf_nodes(**kwargs) \
5440 if isinstance(node, Package) and \
5441 (node.operation != "uninstall" or \
5442 node in scheduled_uninstalls)]
5444 # sys-apps/portage needs special treatment if ROOT="/"
5445 running_root = self._frozen_config._running_root.root
5446 runtime_deps = InternalPackageSet(
5447 initial_atoms=[PORTAGE_PACKAGE_ATOM])
5448 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
5449 PORTAGE_PACKAGE_ATOM)
5450 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
5451 PORTAGE_PACKAGE_ATOM)
5454 running_portage = running_portage[0]
5456 running_portage = None
5458 if replacement_portage:
5459 replacement_portage = replacement_portage[0]
5461 replacement_portage = None
5463 if replacement_portage == running_portage:
5464 replacement_portage = None
5466 if running_portage is not None:
5468 portage_rdepend = self._select_atoms_highest_available(
5469 running_root, running_portage.metadata["RDEPEND"],
5470 myuse=self._pkg_use_enabled(running_portage),
5471 parent=running_portage, strict=False)
5472 except portage.exception.InvalidDependString as e:
5473 portage.writemsg("!!! Invalid RDEPEND in " + \
5474 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5475 (running_root, running_portage.cpv, e), noiselevel=-1)
5477 portage_rdepend = {running_portage : []}
5478 for atoms in portage_rdepend.values():
5479 runtime_deps.update(atom for atom in atoms \
5480 if not atom.blocker)
5482 # Merge libc asap, in order to account for implicit
5483 # dependencies. See bug #303567.
5484 implicit_libc_roots = (running_root,)
5485 for root in implicit_libc_roots:
5487 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5488 graphdb = self._dynamic_config.mydbapi[root]
5489 for atom in self._expand_virt_from_graph(root,
5490 portage.const.LIBC_PACKAGE_ATOM):
5493 match = graphdb.match_pkgs(atom)
5497 if pkg.operation == "merge" and \
5498 not vardb.cpv_exists(pkg.cpv):
5502 # If there's also an os-headers upgrade, we need to
5503 # pull that in first. See bug #328317.
5504 for atom in self._expand_virt_from_graph(root,
5505 portage.const.OS_HEADERS_PACKAGE_ATOM):
5508 match = graphdb.match_pkgs(atom)
5512 if pkg.operation == "merge" and \
5513 not vardb.cpv_exists(pkg.cpv):
5514 asap_nodes.append(pkg)
5516 asap_nodes.extend(libc_pkgs)
5518 def gather_deps(ignore_priority, mergeable_nodes,
5519 selected_nodes, node):
5521 Recursively gather a group of nodes that RDEPEND on
5522 eachother. This ensures that they are merged as a group
5523 and get their RDEPENDs satisfied as soon as possible.
5525 if node in selected_nodes:
5527 if node not in mergeable_nodes:
5529 if node == replacement_portage and \
5530 mygraph.child_nodes(node,
5531 ignore_priority=priority_range.ignore_medium_soft):
5532 # Make sure that portage always has all of it's
5533 # RDEPENDs installed first.
5535 selected_nodes.add(node)
5536 for child in mygraph.child_nodes(node,
5537 ignore_priority=ignore_priority):
5538 if not gather_deps(ignore_priority,
5539 mergeable_nodes, selected_nodes, child):
5543 def ignore_uninst_or_med(priority):
5544 if priority is BlockerDepPriority.instance:
5546 return priority_range.ignore_medium(priority)
5548 def ignore_uninst_or_med_soft(priority):
5549 if priority is BlockerDepPriority.instance:
5551 return priority_range.ignore_medium_soft(priority)
5553 tree_mode = "--tree" in self._frozen_config.myopts
5554 # Tracks whether or not the current iteration should prefer asap_nodes
5555 # if available. This is set to False when the previous iteration
5556 # failed to select any nodes. It is reset whenever nodes are
5557 # successfully selected.
5560 # Controls whether or not the current iteration should drop edges that
5561 # are "satisfied" by installed packages, in order to solve circular
5562 # dependencies. The deep runtime dependencies of installed packages are
5563 # not checked in this case (bug #199856), so it must be avoided
5564 # whenever possible.
5565 drop_satisfied = False
5567 # State of variables for successive iterations that loosen the
5568 # criteria for node selection.
5570 # iteration prefer_asap drop_satisfied
5575 # If no nodes are selected on the last iteration, it is due to
5576 # unresolved blockers or circular dependencies.
5579 self._spinner_update()
5580 selected_nodes = None
5581 ignore_priority = None
5582 if drop_satisfied or (prefer_asap and asap_nodes):
5583 priority_range = DepPrioritySatisfiedRange
5585 priority_range = DepPriorityNormalRange
5586 if prefer_asap and asap_nodes:
5587 # ASAP nodes are merged before their soft deps. Go ahead and
5588 # select root nodes here if necessary, since it's typical for
5589 # the parent to have been removed from the graph already.
5590 asap_nodes = [node for node in asap_nodes \
5591 if mygraph.contains(node)]
5592 for i in range(priority_range.SOFT,
5593 priority_range.MEDIUM_SOFT + 1):
5594 ignore_priority = priority_range.ignore_priority[i]
5595 for node in asap_nodes:
5596 if not mygraph.child_nodes(node,
5597 ignore_priority=ignore_priority):
5598 selected_nodes = [node]
5599 asap_nodes.remove(node)
5604 if not selected_nodes and \
5605 not (prefer_asap and asap_nodes):
5606 for i in range(priority_range.NONE,
5607 priority_range.MEDIUM_SOFT + 1):
5608 ignore_priority = priority_range.ignore_priority[i]
5609 nodes = get_nodes(ignore_priority=ignore_priority)
5611 # If there is a mixture of merges and uninstalls,
5612 # do the uninstalls first.
5613 good_uninstalls = None
5615 good_uninstalls = []
5617 if node.operation == "uninstall":
5618 good_uninstalls.append(node)
5621 nodes = good_uninstalls
5625 if good_uninstalls or len(nodes) == 1 or \
5626 (ignore_priority is None and \
5627 not asap_nodes and not tree_mode):
5628 # Greedily pop all of these nodes since no
5629 # relationship has been ignored. This optimization
5630 # destroys --tree output, so it's disabled in tree
5632 selected_nodes = nodes
5634 # For optimal merge order:
5635 # * Only pop one node.
5636 # * Removing a root node (node without a parent)
5637 # will not produce a leaf node, so avoid it.
5638 # * It's normal for a selected uninstall to be a
5639 # root node, so don't check them for parents.
5641 prefer_asap_parents = (True, False)
5643 prefer_asap_parents = (False,)
5644 for check_asap_parent in prefer_asap_parents:
5645 if check_asap_parent:
5647 parents = mygraph.parent_nodes(node,
5648 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5649 if any(x in asap_nodes for x in parents):
5650 selected_nodes = [node]
5654 if mygraph.parent_nodes(node):
5655 selected_nodes = [node]
5662 if not selected_nodes:
5663 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
5665 mergeable_nodes = set(nodes)
5666 if prefer_asap and asap_nodes:
5668 # When gathering the nodes belonging to a runtime cycle,
5669 # we want to minimize the number of nodes gathered, since
5670 # this tends to produce a more optimal merge order.
5671 # Ignoring all medium_soft deps serves this purpose.
5672 # In the case of multiple runtime cycles, where some cycles
5673 # may depend on smaller independent cycles, it's optimal
5674 # to merge smaller independent cycles before other cycles
5675 # that depend on them. Therefore, we search for the
5676 # smallest cycle in order to try and identify and prefer
5677 # these smaller independent cycles.
5678 ignore_priority = priority_range.ignore_medium_soft
5679 smallest_cycle = None
5681 if not mygraph.parent_nodes(node):
5683 selected_nodes = set()
5684 if gather_deps(ignore_priority,
5685 mergeable_nodes, selected_nodes, node):
5686 # When selecting asap_nodes, we need to ensure
5687 # that we haven't selected a large runtime cycle
5688 # that is obviously sub-optimal. This will be
5689 # obvious if any of the non-asap selected_nodes
5690 # is a leaf node when medium_soft deps are
5692 if prefer_asap and asap_nodes and \
5693 len(selected_nodes) > 1:
5694 for node in selected_nodes.difference(
5696 if not mygraph.child_nodes(node,
5698 DepPriorityNormalRange.ignore_medium_soft):
5699 selected_nodes = None
5702 if smallest_cycle is None or \
5703 len(selected_nodes) < len(smallest_cycle):
5704 smallest_cycle = selected_nodes
5706 selected_nodes = smallest_cycle
5708 if selected_nodes and debug:
5709 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
5710 (len(selected_nodes),), noiselevel=-1)
5711 cycle_digraph = mygraph.copy()
5712 cycle_digraph.difference_update([x for x in
5713 cycle_digraph if x not in selected_nodes])
5714 cycle_digraph.debug_print()
5715 writemsg("\n", noiselevel=-1)
5717 if prefer_asap and asap_nodes and not selected_nodes:
5718 # We failed to find any asap nodes to merge, so ignore
5719 # them for the next iteration.
5723 if selected_nodes and ignore_priority is not None:
5724 # Try to merge ignored medium_soft deps as soon as possible
5725 # if they're not satisfied by installed packages.
5726 for node in selected_nodes:
5727 children = set(mygraph.child_nodes(node))
5728 soft = children.difference(
5729 mygraph.child_nodes(node,
5730 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
5731 medium_soft = children.difference(
5732 mygraph.child_nodes(node,
5734 DepPrioritySatisfiedRange.ignore_medium_soft))
5735 medium_soft.difference_update(soft)
5736 for child in medium_soft:
5737 if child in selected_nodes:
5739 if child in asap_nodes:
5741 # Merge PDEPEND asap for bug #180045.
5742 asap_nodes.append(child)
5744 if selected_nodes and len(selected_nodes) > 1:
5745 if not isinstance(selected_nodes, list):
5746 selected_nodes = list(selected_nodes)
5747 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5749 if not selected_nodes and myblocker_uninstalls:
5750 # An Uninstall task needs to be executed in order to
5751 # avoid conflict if possible.
5754 priority_range = DepPrioritySatisfiedRange
5756 priority_range = DepPriorityNormalRange
5758 mergeable_nodes = get_nodes(
5759 ignore_priority=ignore_uninst_or_med)
5761 min_parent_deps = None
5764 for task in myblocker_uninstalls.leaf_nodes():
5765 # Do some sanity checks so that system or world packages
5766 # don't get uninstalled inappropriately here (only really
5767 # necessary when --complete-graph has not been enabled).
5769 if task in ignored_uninstall_tasks:
5772 if task in scheduled_uninstalls:
5773 # It's been scheduled but it hasn't
5774 # been executed yet due to dependence
5775 # on installation of blocking packages.
5778 root_config = self._frozen_config.roots[task.root]
5779 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5782 if self._dynamic_config.digraph.contains(inst_pkg):
5785 forbid_overlap = False
5786 heuristic_overlap = False
5787 for blocker in myblocker_uninstalls.parent_nodes(task):
5788 if not eapi_has_strong_blocks(blocker.eapi):
5789 heuristic_overlap = True
5790 elif blocker.atom.blocker.overlap.forbid:
5791 forbid_overlap = True
5793 if forbid_overlap and running_root == task.root:
5796 if heuristic_overlap and running_root == task.root:
5797 # Never uninstall sys-apps/portage or it's essential
5798 # dependencies, except through replacement.
5800 runtime_dep_atoms = \
5801 list(runtime_deps.iterAtomsForPackage(task))
5802 except portage.exception.InvalidDependString as e:
5803 portage.writemsg("!!! Invalid PROVIDE in " + \
5804 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5805 (task.root, task.cpv, e), noiselevel=-1)
5809 # Don't uninstall a runtime dep if it appears
5810 # to be the only suitable one installed.
5812 vardb = root_config.trees["vartree"].dbapi
5813 for atom in runtime_dep_atoms:
5814 other_version = None
5815 for pkg in vardb.match_pkgs(atom):
5816 if pkg.cpv == task.cpv and \
5817 pkg.metadata["COUNTER"] == \
5818 task.metadata["COUNTER"]:
5822 if other_version is None:
5828 # For packages in the system set, don't take
5829 # any chances. If the conflict can't be resolved
5830 # by a normal replacement operation then abort.
5833 for atom in root_config.sets[
5834 "system"].iterAtomsForPackage(task):
5837 except portage.exception.InvalidDependString as e:
5838 portage.writemsg("!!! Invalid PROVIDE in " + \
5839 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5840 (task.root, task.cpv, e), noiselevel=-1)
5846 # Note that the world check isn't always
5847 # necessary since self._complete_graph() will
5848 # add all packages from the system and world sets to the
5849 # graph. This just allows unresolved conflicts to be
5850 # detected as early as possible, which makes it possible
5851 # to avoid calling self._complete_graph() when it is
5852 # unnecessary due to blockers triggering an abortion.
5854 # For packages in the world set, go ahead an uninstall
5855 # when necessary, as long as the atom will be satisfied
5856 # in the final state.
5857 graph_db = self._dynamic_config.mydbapi[task.root]
5860 for atom in root_config.sets[
5861 "selected"].iterAtomsForPackage(task):
5863 for pkg in graph_db.match_pkgs(atom):
5870 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5872 except portage.exception.InvalidDependString as e:
5873 portage.writemsg("!!! Invalid PROVIDE in " + \
5874 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5875 (task.root, task.cpv, e), noiselevel=-1)
5881 # Check the deps of parent nodes to ensure that
5882 # the chosen task produces a leaf node. Maybe
5883 # this can be optimized some more to make the
5884 # best possible choice, but the current algorithm
5885 # is simple and should be near optimal for most
5887 self._spinner_update()
5888 mergeable_parent = False
5890 parent_deps.add(task)
5891 for parent in mygraph.parent_nodes(task):
5892 parent_deps.update(mygraph.child_nodes(parent,
5893 ignore_priority=priority_range.ignore_medium_soft))
5894 if min_parent_deps is not None and \
5895 len(parent_deps) >= min_parent_deps:
5896 # This task is no better than a previously selected
5897 # task, so abort search now in order to avoid wasting
5898 # any more cpu time on this task. This increases
5899 # performance dramatically in cases when there are
5900 # hundreds of blockers to solve, like when
5901 # upgrading to a new slot of kde-meta.
5902 mergeable_parent = None
5904 if parent in mergeable_nodes and \
5905 gather_deps(ignore_uninst_or_med_soft,
5906 mergeable_nodes, set(), parent):
5907 mergeable_parent = True
5909 if not mergeable_parent:
5912 if min_parent_deps is None or \
5913 len(parent_deps) < min_parent_deps:
5914 min_parent_deps = len(parent_deps)
5917 if uninst_task is not None and min_parent_deps == 1:
5918 # This is the best possible result, so so abort search
5919 # now in order to avoid wasting any more cpu time.
5922 if uninst_task is not None:
5923 # The uninstall is performed only after blocking
5924 # packages have been merged on top of it. File
5925 # collisions between blocking packages are detected
5926 # and removed from the list of files to be uninstalled.
5927 scheduled_uninstalls.add(uninst_task)
5928 parent_nodes = mygraph.parent_nodes(uninst_task)
5930 # Reverse the parent -> uninstall edges since we want
5931 # to do the uninstall after blocking packages have
5932 # been merged on top of it.
5933 mygraph.remove(uninst_task)
5934 for blocked_pkg in parent_nodes:
5935 mygraph.add(blocked_pkg, uninst_task,
5936 priority=BlockerDepPriority.instance)
5937 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5938 scheduler_graph.add(blocked_pkg, uninst_task,
5939 priority=BlockerDepPriority.instance)
5941 # Sometimes a merge node will render an uninstall
5942 # node unnecessary (due to occupying the same SLOT),
5943 # and we want to avoid executing a separate uninstall
5944 # task in that case.
5945 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5946 ].match_pkgs(uninst_task.slot_atom)
5948 slot_node[0].operation == "merge":
5949 mygraph.add(slot_node[0], uninst_task,
5950 priority=BlockerDepPriority.instance)
5952 # Reset the state variables for leaf node selection and
5953 # continue trying to select leaf nodes.
5955 drop_satisfied = False
5958 if not selected_nodes:
5959 # Only select root nodes as a last resort. This case should
5960 # only trigger when the graph is nearly empty and the only
5961 # remaining nodes are isolated (no parents or children). Since
5962 # the nodes must be isolated, ignore_priority is not needed.
5963 selected_nodes = get_nodes()
5965 if not selected_nodes and not drop_satisfied:
5966 drop_satisfied = True
5969 if not selected_nodes and myblocker_uninstalls:
5970 # If possible, drop an uninstall task here in order to avoid
5971 # the circular deps code path. The corresponding blocker will
5972 # still be counted as an unresolved conflict.
5974 for node in myblocker_uninstalls.leaf_nodes():
5976 mygraph.remove(node)
5981 ignored_uninstall_tasks.add(node)
5984 if uninst_task is not None:
5985 # Reset the state variables for leaf node selection and
5986 # continue trying to select leaf nodes.
5988 drop_satisfied = False
5991 if not selected_nodes:
5992 self._dynamic_config._circular_deps_for_display = mygraph
5993 self._dynamic_config._skip_restart = True
5994 raise self._unknown_internal_error()
5996 # At this point, we've succeeded in selecting one or more nodes, so
5997 # reset state variables for leaf node selection.
5999 drop_satisfied = False
6001 mygraph.difference_update(selected_nodes)
6003 for node in selected_nodes:
6004 if isinstance(node, Package) and \
6005 node.operation == "nomerge":
6008 # Handle interactions between blockers
6009 # and uninstallation tasks.
6010 solved_blockers = set()
6012 if isinstance(node, Package) and \
6013 "uninstall" == node.operation:
6014 have_uninstall_task = True
6017 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
6018 inst_pkg = vardb.match_pkgs(node.slot_atom)
6020 # The package will be replaced by this one, so remove
6021 # the corresponding Uninstall task if necessary.
6022 inst_pkg = inst_pkg[0]
6023 uninst_task = Package(built=inst_pkg.built,
6024 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6025 metadata=inst_pkg.metadata,
6026 operation="uninstall",
6027 root_config=inst_pkg.root_config,
6028 type_name=inst_pkg.type_name)
6030 mygraph.remove(uninst_task)
6034 if uninst_task is not None and \
6035 uninst_task not in ignored_uninstall_tasks and \
6036 myblocker_uninstalls.contains(uninst_task):
6037 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6038 myblocker_uninstalls.remove(uninst_task)
6039 # Discard any blockers that this Uninstall solves.
6040 for blocker in blocker_nodes:
6041 if not myblocker_uninstalls.child_nodes(blocker):
6042 myblocker_uninstalls.remove(blocker)
6044 self._dynamic_config._unsolvable_blockers:
6045 solved_blockers.add(blocker)
6047 retlist.append(node)
6049 if (isinstance(node, Package) and \
6050 "uninstall" == node.operation) or \
6051 (uninst_task is not None and \
6052 uninst_task in scheduled_uninstalls):
6053 # Include satisfied blockers in the merge list
6054 # since the user might be interested and also
6055 # it serves as an indicator that blocking packages
6056 # will be temporarily installed simultaneously.
6057 for blocker in solved_blockers:
6058 retlist.append(blocker)
6060 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
6061 for node in myblocker_uninstalls.root_nodes():
6062 unsolvable_blockers.add(node)
6064 # If any Uninstall tasks need to be executed in order
6065 # to avoid a conflict, complete the graph with any
6066 # dependencies that may have been initially
6067 # neglected (to ensure that unsafe Uninstall tasks
6068 # are properly identified and blocked from execution).
6069 if have_uninstall_task and \
6071 not unsolvable_blockers:
6072 self._dynamic_config.myparams["complete"] = True
6073 if '--debug' in self._frozen_config.myopts:
6075 msg.append("enabling 'complete' depgraph mode " + \
6076 "due to uninstall task(s):")
6078 for node in retlist:
6079 if isinstance(node, Package) and \
6080 node.operation == 'uninstall':
6081 msg.append("\t%s" % (node,))
6082 writemsg_level("\n%s\n" % \
6083 "".join("%s\n" % line for line in msg),
6084 level=logging.DEBUG, noiselevel=-1)
6085 raise self._serialize_tasks_retry("")
6087 # Set satisfied state on blockers, but not before the
6088 # above retry path, since we don't want to modify the
6089 # state in that case.
6090 for node in retlist:
6091 if isinstance(node, Blocker):
6092 node.satisfied = True
6094 for blocker in unsolvable_blockers:
6095 retlist.append(blocker)
6097 if unsolvable_blockers and \
6098 not self._accept_blocker_conflicts():
6099 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
6100 self._dynamic_config._serialized_tasks_cache = retlist[:]
6101 self._dynamic_config._scheduler_graph = scheduler_graph
6102 self._dynamic_config._skip_restart = True
6103 raise self._unknown_internal_error()
6105 if self._dynamic_config._slot_collision_info and \
6106 not self._accept_blocker_conflicts():
6107 self._dynamic_config._serialized_tasks_cache = retlist[:]
6108 self._dynamic_config._scheduler_graph = scheduler_graph
6109 raise self._unknown_internal_error()
6111 return retlist, scheduler_graph
6113 def _show_circular_deps(self, mygraph):
6114 self._dynamic_config._circular_dependency_handler = \
6115 circular_dependency_handler(self, mygraph)
6116 handler = self._dynamic_config._circular_dependency_handler
6118 self._frozen_config.myopts.pop("--quiet", None)
6119 self._frozen_config.myopts["--verbose"] = True
6120 self._frozen_config.myopts["--tree"] = True
6121 portage.writemsg("\n\n", noiselevel=-1)
6122 self.display(handler.merge_list)
6123 prefix = colorize("BAD", " * ")
6124 portage.writemsg("\n", noiselevel=-1)
6125 portage.writemsg(prefix + "Error: circular dependencies:\n",
6127 portage.writemsg("\n", noiselevel=-1)
6129 if handler.circular_dep_message is None:
6130 handler.debug_print()
6131 portage.writemsg("\n", noiselevel=-1)
6133 if handler.circular_dep_message is not None:
6134 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
6136 suggestions = handler.suggestions
6138 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
6139 if len(suggestions) == 1:
6140 writemsg("by applying the following change:\n", noiselevel=-1)
6142 writemsg("by applying " + colorize("bold", "any of") + \
6143 " the following changes:\n", noiselevel=-1)
6144 writemsg("".join(suggestions), noiselevel=-1)
6145 writemsg("\nNote that this change can be reverted, once the package has" + \
6146 " been installed.\n", noiselevel=-1)
6147 if handler.large_cycle_count:
6148 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
6149 "Several changes might be required to resolve all cycles.\n" + \
6150 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
6152 writemsg("\n\n", noiselevel=-1)
6153 writemsg(prefix + "Note that circular dependencies " + \
6154 "can often be avoided by temporarily\n", noiselevel=-1)
6155 writemsg(prefix + "disabling USE flags that trigger " + \
6156 "optional dependencies.\n", noiselevel=-1)
6158 def _show_merge_list(self):
6159 if self._dynamic_config._serialized_tasks_cache is not None and \
6160 not (self._dynamic_config._displayed_list is not None and \
6161 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
6162 self._dynamic_config._displayed_list == \
6163 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
6164 display_list = self._dynamic_config._serialized_tasks_cache[:]
6165 if "--tree" in self._frozen_config.myopts:
6166 display_list.reverse()
6167 self.display(display_list)
6169 def _show_unsatisfied_blockers(self, blockers):
6170 self._show_merge_list()
6171 msg = "Error: The above package list contains " + \
6172 "packages which cannot be installed " + \
6173 "at the same time on the same system."
6174 prefix = colorize("BAD", " * ")
6175 portage.writemsg("\n", noiselevel=-1)
6176 for line in textwrap.wrap(msg, 70):
6177 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6179 # Display the conflicting packages along with the packages
6180 # that pulled them in. This is helpful for troubleshooting
6181 # cases in which blockers don't solve automatically and
6182 # the reasons are not apparent from the normal merge list
6186 for blocker in blockers:
6187 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
6188 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
6189 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
6190 if not parent_atoms:
6191 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
6192 if atom is not None:
6193 parent_atoms = set([("@selected", atom)])
6195 conflict_pkgs[pkg] = parent_atoms
6198 # Reduce noise by pruning packages that are only
6199 # pulled in by other conflict packages.
6201 for pkg, parent_atoms in conflict_pkgs.items():
6202 relevant_parent = False
6203 for parent, atom in parent_atoms:
6204 if parent not in conflict_pkgs:
6205 relevant_parent = True
6207 if not relevant_parent:
6208 pruned_pkgs.add(pkg)
6209 for pkg in pruned_pkgs:
6210 del conflict_pkgs[pkg]
6216 for pkg, parent_atoms in conflict_pkgs.items():
6218 # Prefer packages that are not directly involved in a conflict.
6219 # It can be essential to see all the packages here, so don't
6220 # omit any. If the list is long, people can simply use a pager.
6221 preferred_parents = set()
6222 for parent_atom in parent_atoms:
6223 parent, atom = parent_atom
6224 if parent not in conflict_pkgs:
6225 preferred_parents.add(parent_atom)
6227 ordered_list = list(preferred_parents)
6228 if len(parent_atoms) > len(ordered_list):
6229 for parent_atom in parent_atoms:
6230 if parent_atom not in preferred_parents:
6231 ordered_list.append(parent_atom)
6233 msg.append(indent + "%s pulled in by\n" % pkg)
6235 for parent_atom in ordered_list:
6236 parent, atom = parent_atom
6237 msg.append(2*indent)
6238 if isinstance(parent,
6239 (PackageArg, AtomArg)):
6240 # For PackageArg and AtomArg types, it's
6241 # redundant to display the atom attribute.
6242 msg.append(str(parent))
6244 # Display the specific atom from SetArg or
6246 msg.append("%s required by %s" % (atom, parent))
6251 writemsg("".join(msg), noiselevel=-1)
6253 if "--quiet" not in self._frozen_config.myopts:
6254 show_blocker_docs_link()
6256 def display(self, mylist, favorites=[], verbosity=None):
6258 # This is used to prevent display_problems() from
6259 # redundantly displaying this exact same merge list
6260 # again via _show_merge_list().
6261 self._dynamic_config._displayed_list = mylist
6264 return display(self, mylist, favorites, verbosity)
6266 def _display_autounmask(self):
6268 Display --autounmask message and optionally write it to config files
6269 (using CONFIG_PROTECT). The message includes the comments and the changes.
6272 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
6273 autounmask_unrestricted_atoms = \
6274 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
6275 quiet = "--quiet" in self._frozen_config.myopts
6276 pretend = "--pretend" in self._frozen_config.myopts
6277 ask = "--ask" in self._frozen_config.myopts
6278 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
6280 def check_if_latest(pkg):
6282 is_latest_in_slot = True
6283 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
6284 root_config = self._frozen_config.roots[pkg.root]
6286 for db, pkg_type, built, installed, db_keys in dbs:
6287 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
6288 if other_pkg.cp != pkg.cp:
6289 # old-style PROVIDE virtual means there are no
6290 # normal matches for this pkg_type
6294 if other_pkg.slot_atom == pkg.slot_atom:
6295 is_latest_in_slot = False
6298 # iter_match_pkgs yields highest version first, so
6299 # there's no need to search this pkg_type any further
6302 if not is_latest_in_slot:
6305 return is_latest, is_latest_in_slot
6307 #Set of roots we have autounmask changes for.
6310 masked_by_missing_keywords = False
6311 unstable_keyword_msg = {}
6312 for pkg in self._dynamic_config._needed_unstable_keywords:
6313 self._show_merge_list()
6314 if pkg in self._dynamic_config.digraph:
6317 unstable_keyword_msg.setdefault(root, [])
6318 is_latest, is_latest_in_slot = check_if_latest(pkg)
6319 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6320 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6321 use=self._pkg_use_enabled(pkg))
6322 for reason in mreasons:
6323 if reason.unmask_hint and \
6324 reason.unmask_hint.key == 'unstable keyword':
6325 keyword = reason.unmask_hint.value
6327 masked_by_missing_keywords = True
6329 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
6330 if autounmask_unrestricted_atoms:
6332 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
6333 elif is_latest_in_slot:
6334 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
6336 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6338 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6340 p_mask_change_msg = {}
6341 for pkg in self._dynamic_config._needed_p_mask_changes:
6342 self._show_merge_list()
6343 if pkg in self._dynamic_config.digraph:
6346 p_mask_change_msg.setdefault(root, [])
6347 is_latest, is_latest_in_slot = check_if_latest(pkg)
6348 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6349 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6350 use=self._pkg_use_enabled(pkg))
6351 for reason in mreasons:
6352 if reason.unmask_hint and \
6353 reason.unmask_hint.key == 'p_mask':
6354 keyword = reason.unmask_hint.value
6356 comment, filename = portage.getmaskingreason(
6357 pkg.cpv, metadata=pkg.metadata,
6358 settings=pkgsettings,
6359 portdb=pkg.root_config.trees["porttree"].dbapi,
6360 return_location=True)
6362 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
6364 p_mask_change_msg[root].append("# %s:\n" % filename)
6366 comment = [line for line in
6367 comment.splitlines() if line]
6368 for line in comment:
6369 p_mask_change_msg[root].append("%s\n" % line)
6370 if autounmask_unrestricted_atoms:
6372 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
6373 elif is_latest_in_slot:
6374 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
6376 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6378 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6380 use_changes_msg = {}
6381 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
6382 self._show_merge_list()
6383 if pkg in self._dynamic_config.digraph:
6386 use_changes_msg.setdefault(root, [])
6387 is_latest, is_latest_in_slot = check_if_latest(pkg)
6388 changes = needed_use_config_change[1]
6390 for flag, state in changes.items():
6392 adjustments.append(flag)
6394 adjustments.append("-" + flag)
6395 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
6397 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6398 elif is_latest_in_slot:
6399 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
6401 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6404 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
6405 self._show_merge_list()
6406 if pkg in self._dynamic_config.digraph:
6409 license_msg.setdefault(root, [])
6410 is_latest, is_latest_in_slot = check_if_latest(pkg)
6412 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
6414 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6415 elif is_latest_in_slot:
6416 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
6418 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6420 def find_config_file(abs_user_config, file_name):
6422 Searches /etc/portage for an appropriate file to append changes to.
6423 If the file_name is a file it is returned, if it is a directory, the
6424 last file in it is returned. Order of traversal is the identical to
6425 portage.util.grablines(recursive=True).
6427 file_name - String containing a file name like "package.use"
6428 return value - String. Absolute path of file to write to. None if
6429 no suitable file exists.
6431 file_path = os.path.join(abs_user_config, file_name)
6435 except OSError as e:
6436 if e.errno == errno.ENOENT:
6437 # The file doesn't exist, so we'll
6441 # Disk or file system trouble?
6444 last_file_path = None
6453 if stat.S_ISREG(st.st_mode):
6455 elif stat.S_ISDIR(st.st_mode):
6456 if os.path.basename(p) in _ignorecvs_dirs:
6459 contents = os.listdir(p)
6463 contents.sort(reverse=True)
6464 for child in contents:
6465 if child.startswith(".") or \
6466 child.endswith("~"):
6468 stack.append(os.path.join(p, child))
6470 return last_file_path
6472 write_to_file = autounmask_write and not pretend
6473 #Make sure we have a file to write to before doing any write.
6474 file_to_write_to = {}
6478 settings = self._frozen_config.roots[root].settings
6479 abs_user_config = os.path.join(
6480 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6482 if root in unstable_keyword_msg:
6483 if not os.path.exists(os.path.join(abs_user_config,
6484 "package.keywords")):
6485 filename = "package.accept_keywords"
6487 filename = "package.keywords"
6488 file_to_write_to[(abs_user_config, "package.keywords")] = \
6489 find_config_file(abs_user_config, filename)
6491 if root in p_mask_change_msg:
6492 file_to_write_to[(abs_user_config, "package.unmask")] = \
6493 find_config_file(abs_user_config, "package.unmask")
6495 if root in use_changes_msg:
6496 file_to_write_to[(abs_user_config, "package.use")] = \
6497 find_config_file(abs_user_config, "package.use")
6499 if root in license_msg:
6500 file_to_write_to[(abs_user_config, "package.license")] = \
6501 find_config_file(abs_user_config, "package.license")
6503 for (abs_user_config, f), path in file_to_write_to.items():
6505 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6507 write_to_file = not problems
6509 def format_msg(lines):
6511 for i, line in enumerate(lines):
6512 if line.startswith("#"):
6514 lines[i] = colorize("INFORM", line.rstrip()) + "\n"
6515 return "".join(lines)
6518 settings = self._frozen_config.roots[root].settings
6519 abs_user_config = os.path.join(
6520 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6523 writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
6525 if root in unstable_keyword_msg:
6526 writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
6527 " are necessary to proceed:\n", noiselevel=-1)
6528 writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
6530 if root in p_mask_change_msg:
6531 writemsg("\nThe following " + colorize("BAD", "mask changes") + \
6532 " are necessary to proceed:\n", noiselevel=-1)
6533 writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
6535 if root in use_changes_msg:
6536 writemsg("\nThe following " + colorize("BAD", "USE changes") + \
6537 " are necessary to proceed:\n", noiselevel=-1)
6538 writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
6540 if root in license_msg:
6541 writemsg("\nThe following " + colorize("BAD", "license changes") + \
6542 " are necessary to proceed:\n", noiselevel=-1)
6543 writemsg(format_msg(license_msg[root]), noiselevel=-1)
6548 settings = self._frozen_config.roots[root].settings
6549 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6550 shlex_split(settings.get("CONFIG_PROTECT", "")),
6551 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6553 def write_changes(root, changes, file_to_write_to):
6554 file_contents = None
6556 file_contents = io.open(
6557 _unicode_encode(file_to_write_to,
6558 encoding=_encodings['fs'], errors='strict'),
6559 mode='r', encoding=_encodings['content'],
6560 errors='replace').readlines()
6561 except IOError as e:
6562 if e.errno == errno.ENOENT:
6565 problems.append("!!! Failed to read '%s': %s\n" % \
6566 (file_to_write_to, e))
6567 if file_contents is not None:
6568 file_contents.extend(changes)
6569 if protect_obj[root].isprotected(file_to_write_to):
6570 # We want to force new_protect_filename to ensure
6571 # that the user will see all our changes via
6572 # dispatch-conf, even if file_to_write_to doesn't
6573 # exist yet, so we specify force=True.
6574 file_to_write_to = new_protect_filename(file_to_write_to,
6577 write_atomic(file_to_write_to, "".join(file_contents))
6578 except PortageException:
6579 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6581 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
6584 "NOTE: The --autounmask-keep-masks option will prevent emerge",
6585 " from creating package.unmask or ** keyword changes."
6589 line = colorize("INFORM", line)
6590 writemsg(line + "\n", noiselevel=-1)
6592 if ask and write_to_file and file_to_write_to:
6593 prompt = "\nWould you like to add these " + \
6594 "changes to your config files?"
6595 if userquery(prompt, enter_invalid) == 'No':
6596 write_to_file = False
6598 if write_to_file and file_to_write_to:
6600 settings = self._frozen_config.roots[root].settings
6601 abs_user_config = os.path.join(
6602 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6603 ensure_dirs(abs_user_config)
6605 if root in unstable_keyword_msg:
6606 write_changes(root, unstable_keyword_msg[root],
6607 file_to_write_to.get((abs_user_config, "package.keywords")))
6609 if root in p_mask_change_msg:
6610 write_changes(root, p_mask_change_msg[root],
6611 file_to_write_to.get((abs_user_config, "package.unmask")))
6613 if root in use_changes_msg:
6614 write_changes(root, use_changes_msg[root],
6615 file_to_write_to.get((abs_user_config, "package.use")))
6617 if root in license_msg:
6618 write_changes(root, license_msg[root],
6619 file_to_write_to.get((abs_user_config, "package.license")))
6622 writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
6624 writemsg("".join(problems), noiselevel=-1)
6625 elif write_to_file and roots:
6626 writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
6628 elif not pretend and not autounmask_write and roots:
6629 writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
6633 def display_problems(self):
6635 Display problems with the dependency graph such as slot collisions.
6636 This is called internally by display() to show the problems _after_
6637 the merge list where it is most likely to be seen, but if display()
6638 is not going to be called then this method should be called explicitly
6639 to ensure that the user is notified of problems with the graph.
6642 if self._dynamic_config._circular_deps_for_display is not None:
6643 self._show_circular_deps(
6644 self._dynamic_config._circular_deps_for_display)
6646 # The slot conflict display has better noise reduction than
6647 # the unsatisfied blockers display, so skip unsatisfied blockers
6648 # display if there are slot conflicts (see bug #385391).
6649 if self._dynamic_config._slot_collision_info:
6650 self._show_slot_collision_notice()
6651 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
6652 self._show_unsatisfied_blockers(
6653 self._dynamic_config._unsatisfied_blockers_for_display)
6655 self._show_missed_update()
6657 self._show_ignored_binaries()
6659 self._display_autounmask()
6661 # TODO: Add generic support for "set problem" handlers so that
6662 # the below warnings aren't special cases for world only.
6664 if self._dynamic_config._missing_args:
6665 world_problems = False
6666 if "world" in self._dynamic_config.sets[
6667 self._frozen_config.target_root].sets:
6668 # Filter out indirect members of world (from nested sets)
6669 # since only direct members of world are desired here.
6670 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
6671 for arg, atom in self._dynamic_config._missing_args:
6672 if arg.name in ("selected", "world") and atom in world_set:
6673 world_problems = True
6677 sys.stderr.write("\n!!! Problems have been " + \
6678 "detected with your world file\n")
6679 sys.stderr.write("!!! Please run " + \
6680 green("emaint --check world")+"\n\n")
6682 if self._dynamic_config._missing_args:
6683 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6684 " Ebuilds for the following packages are either all\n")
6685 sys.stderr.write(colorize("BAD", "!!!") + \
6686 " masked or don't exist:\n")
6687 sys.stderr.write(" ".join(str(atom) for arg, atom in \
6688 self._dynamic_config._missing_args) + "\n")
6690 if self._dynamic_config._pprovided_args:
6692 for arg, atom in self._dynamic_config._pprovided_args:
6693 if isinstance(arg, SetArg):
6695 arg_atom = (atom, atom)
6698 arg_atom = (arg.arg, atom)
6699 refs = arg_refs.setdefault(arg_atom, [])
6700 if parent not in refs:
6703 msg.append(bad("\nWARNING: "))
6704 if len(self._dynamic_config._pprovided_args) > 1:
6705 msg.append("Requested packages will not be " + \
6706 "merged because they are listed in\n")
6708 msg.append("A requested package will not be " + \
6709 "merged because it is listed in\n")
6710 msg.append("package.provided:\n\n")
6711 problems_sets = set()
6712 for (arg, atom), refs in arg_refs.items():
6715 problems_sets.update(refs)
6717 ref_string = ", ".join(["'%s'" % name for name in refs])
6718 ref_string = " pulled in by " + ref_string
6719 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
6721 if "selected" in problems_sets or "world" in problems_sets:
6722 msg.append("This problem can be solved in one of the following ways:\n\n")
6723 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
6724 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
6725 msg.append(" C) Remove offending entries from package.provided.\n\n")
6726 msg.append("The best course of action depends on the reason that an offending\n")
6727 msg.append("package.provided entry exists.\n\n")
6728 sys.stderr.write("".join(msg))
6730 masked_packages = []
6731 for pkg in self._dynamic_config._masked_license_updates:
6732 root_config = pkg.root_config
6733 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6734 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
6735 masked_packages.append((root_config, pkgsettings,
6736 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6738 writemsg("\n" + colorize("BAD", "!!!") + \
6739 " The following updates are masked by LICENSE changes:\n",
6741 show_masked_packages(masked_packages)
6743 writemsg("\n", noiselevel=-1)
6745 masked_packages = []
6746 for pkg in self._dynamic_config._masked_installed:
6747 root_config = pkg.root_config
6748 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6749 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
6750 masked_packages.append((root_config, pkgsettings,
6751 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6753 writemsg("\n" + colorize("BAD", "!!!") + \
6754 " The following installed packages are masked:\n",
6756 show_masked_packages(masked_packages)
6758 writemsg("\n", noiselevel=-1)
6760 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6761 self._show_unsatisfied_dep(*pargs, **kwargs)
6763 def saveNomergeFavorites(self):
6764 """Find atoms in favorites that are not in the mergelist and add them
6765 to the world file if necessary."""
6766 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6767 "--oneshot", "--onlydeps", "--pretend"):
6768 if x in self._frozen_config.myopts:
6770 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6771 world_set = root_config.sets["selected"]
6773 world_locked = False
6774 if hasattr(world_set, "lock"):
6778 if hasattr(world_set, "load"):
6779 world_set.load() # maybe it's changed on disk
6781 args_set = self._dynamic_config.sets[
6782 self._frozen_config.target_root].sets['__non_set_args__']
6783 added_favorites = set()
6784 for x in self._dynamic_config._set_nodes:
6785 if x.operation != "nomerge":
6788 if x.root != root_config.root:
6792 myfavkey = create_world_atom(x, args_set, root_config)
6794 if myfavkey in added_favorites:
6796 added_favorites.add(myfavkey)
6797 except portage.exception.InvalidDependString as e:
6798 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6799 (x.cpv, e), noiselevel=-1)
6800 writemsg("!!! see '%s'\n\n" % os.path.join(
6801 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6804 for arg in self._dynamic_config._initial_arg_list:
6805 if not isinstance(arg, SetArg):
6807 if arg.root_config.root != root_config.root:
6813 if k in ("selected", "world") or \
6814 not root_config.sets[k].world_candidate:
6819 all_added.append(SETPREFIX + k)
6820 all_added.extend(added_favorites)
6823 if a.startswith(SETPREFIX):
6824 filename = "world_sets"
6828 ">>> Recording %s in \"%s\" favorites file...\n" %
6829 (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
6831 world_set.update(all_added)
6836 def _loadResumeCommand(self, resume_data, skip_masked=True,
6839 Add a resume command to the graph and validate it in the process. This
6840 will raise a PackageNotFound exception if a package is not available.
6845 if not isinstance(resume_data, dict):
6848 mergelist = resume_data.get("mergelist")
6849 if not isinstance(mergelist, list):
6852 favorites = resume_data.get("favorites")
6853 if isinstance(favorites, list):
6854 args = self._load_favorites(favorites)
6858 fakedb = self._dynamic_config.mydbapi
6859 serialized_tasks = []
6862 if not (isinstance(x, list) and len(x) == 4):
6864 pkg_type, myroot, pkg_key, action = x
6865 if pkg_type not in self.pkg_tree_map:
6867 if action != "merge":
6869 root_config = self._frozen_config.roots[myroot]
6871 # Use the resume "favorites" list to see if a repo was specified
6873 depgraph_sets = self._dynamic_config.sets[root_config.root]
6875 for atom in depgraph_sets.atoms.getAtoms():
6876 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6880 atom = "=" + pkg_key
6882 atom = atom + _repo_separator + repo
6885 atom = Atom(atom, allow_repo=True)
6890 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6891 if not self._pkg_visibility_check(pkg) or \
6892 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6893 modified_use=self._pkg_use_enabled(pkg)):
6898 # It does no exist or it is corrupt.
6900 # TODO: log these somewhere
6902 raise portage.exception.PackageNotFound(pkg_key)
6904 if "merge" == pkg.operation and \
6905 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6906 modified_use=self._pkg_use_enabled(pkg)):
6909 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6911 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6913 self._dynamic_config._unsatisfied_deps_for_display.append(
6914 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6916 fakedb[myroot].cpv_inject(pkg)
6917 serialized_tasks.append(pkg)
6918 self._spinner_update()
6920 if self._dynamic_config._unsatisfied_deps_for_display:
6923 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6924 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6925 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6927 self._select_package = self._select_pkg_from_graph
6928 self._dynamic_config.myparams["selective"] = True
6929 # Always traverse deep dependencies in order to account for
6930 # potentially unsatisfied dependencies of installed packages.
6931 # This is necessary for correct --keep-going or --resume operation
6932 # in case a package from a group of circularly dependent packages
6933 # fails. In this case, a package which has recently been installed
6934 # may have an unsatisfied circular dependency (pulled in by
6935 # PDEPEND, for example). So, even though a package is already
6936 # installed, it may not have all of it's dependencies satisfied, so
6937 # it may not be usable. If such a package is in the subgraph of
6938 # deep depenedencies of a scheduled build, that build needs to
6939 # be cancelled. In order for this type of situation to be
6940 # recognized, deep traversal of dependencies is required.
6941 self._dynamic_config.myparams["deep"] = True
6943 for task in serialized_tasks:
6944 if isinstance(task, Package) and \
6945 task.operation == "merge":
6946 if not self._add_pkg(task, None):
6949 # Packages for argument atoms need to be explicitly
6950 # added via _add_pkg() so that they are included in the
6951 # digraph (needed at least for --tree display).
6952 for arg in self._expand_set_args(args, add_to_digraph=True):
6953 for atom in arg.pset.getAtoms():
6954 pkg, existing_node = self._select_package(
6955 arg.root_config.root, atom)
6956 if existing_node is None and \
6958 if not self._add_pkg(pkg, Dependency(atom=atom,
6959 root=pkg.root, parent=arg)):
6962 # Allow unsatisfied deps here to avoid showing a masking
6963 # message for an unsatisfied dep that isn't necessarily
6965 if not self._create_graph(allow_unsatisfied=True):
6968 unsatisfied_deps = []
6969 for dep in self._dynamic_config._unsatisfied_deps:
6970 if not isinstance(dep.parent, Package):
6972 if dep.parent.operation == "merge":
6973 unsatisfied_deps.append(dep)
6976 # For unsatisfied deps of installed packages, only account for
6977 # them if they are in the subgraph of dependencies of a package
6978 # which is scheduled to be installed.
6979 unsatisfied_install = False
6981 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6983 node = dep_stack.pop()
6984 if not isinstance(node, Package):
6986 if node.operation == "merge":
6987 unsatisfied_install = True
6989 if node in traversed:
6992 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
6994 if unsatisfied_install:
6995 unsatisfied_deps.append(dep)
6997 if masked_tasks or unsatisfied_deps:
6998 # This probably means that a required package
6999 # was dropped via --skipfirst. It makes the
7000 # resume list invalid, so convert it to a
7001 # UnsatisfiedResumeDep exception.
7002 raise self.UnsatisfiedResumeDep(self,
7003 masked_tasks + unsatisfied_deps)
7004 self._dynamic_config._serialized_tasks_cache = None
7007 except self._unknown_internal_error:
7012 def _load_favorites(self, favorites):
7014 Use a list of favorites to resume state from a
7015 previous select_files() call. This creates similar
7016 DependencyArg instances to those that would have
7017 been created by the original select_files() call.
7018 This allows Package instances to be matched with
7019 DependencyArg instances during graph creation.
7021 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7022 sets = root_config.sets
7023 depgraph_sets = self._dynamic_config.sets[root_config.root]
7026 if not isinstance(x, basestring):
7028 if x in ("system", "world"):
7030 if x.startswith(SETPREFIX):
7031 s = x[len(SETPREFIX):]
7034 if s in depgraph_sets.sets:
7037 depgraph_sets.sets[s] = pset
7038 args.append(SetArg(arg=x, pset=pset,
7039 root_config=root_config))
7042 x = Atom(x, allow_repo=True)
7043 except portage.exception.InvalidAtom:
7045 args.append(AtomArg(arg=x, atom=x,
7046 root_config=root_config))
7048 self._set_args(args)
7051 class UnsatisfiedResumeDep(portage.exception.PortageException):
7053 A dependency of a resume list is not installed. This
7054 can occur when a required package is dropped from the
7055 merge list via --skipfirst.
7057 def __init__(self, depgraph, value):
7058 portage.exception.PortageException.__init__(self, value)
7059 self.depgraph = depgraph
7061 class _internal_exception(portage.exception.PortageException):
7062 def __init__(self, value=""):
7063 portage.exception.PortageException.__init__(self, value)
7065 class _unknown_internal_error(_internal_exception):
7067 Used by the depgraph internally to terminate graph creation.
7068 The specific reason for the failure should have been dumped
7069 to stderr, unfortunately, the exact reason for the failure
7073 class _serialize_tasks_retry(_internal_exception):
7075 This is raised by the _serialize_tasks() method when it needs to
7076 be called again for some reason. The only case that it's currently
7077 used for is when neglected dependencies need to be added to the
7078 graph in order to avoid making a potentially unsafe decision.
7081 class _backtrack_mask(_internal_exception):
7083 This is raised by _show_unsatisfied_dep() when it's called with
7084 check_backtrack=True and a matching package has been masked by
7088 class _autounmask_breakage(_internal_exception):
7090 This is raised by _show_unsatisfied_dep() when it's called with
7091 check_autounmask_breakage=True and a matching package has been
7092 been disqualified due to autounmask changes.
7095 def need_restart(self):
7096 return self._dynamic_config._need_restart and \
7097 not self._dynamic_config._skip_restart
7099 def success_without_autounmask(self):
7100 return self._dynamic_config._success_without_autounmask
7102 def autounmask_breakage_detected(self):
7104 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7105 self._show_unsatisfied_dep(
7106 *pargs, check_autounmask_breakage=True, **kwargs)
7107 except self._autounmask_breakage:
7111 def get_backtrack_infos(self):
7112 return self._dynamic_config._backtrack_infos
7115 class _dep_check_composite_db(dbapi):
7117 A dbapi-like interface that is optimized for use in dep_check() calls.
7118 This is built on top of the existing depgraph package selection logic.
7119 Some packages that have been added to the graph may be masked from this
7120 view in order to influence the atom preference selection that occurs
7123 def __init__(self, depgraph, root):
7124 dbapi.__init__(self)
7125 self._depgraph = depgraph
7127 self._match_cache = {}
7128 self._cpv_pkg_map = {}
7130 def _clear_cache(self):
7131 self._match_cache.clear()
7132 self._cpv_pkg_map.clear()
7134 def cp_list(self, cp):
7136 Emulate cp_list just so it can be used to check for existence
7137 of new-style virtuals. Since it's a waste of time to return
7138 more than one cpv for this use case, a maximum of one cpv will
7141 if isinstance(cp, Atom):
7146 for pkg in self._depgraph._iter_match_pkgs_any(
7147 self._depgraph._frozen_config.roots[self._root], atom):
7154 def match(self, atom):
7155 cache_key = (atom, atom.unevaluated_atom)
7156 ret = self._match_cache.get(cache_key)
7161 pkg, existing = self._depgraph._select_package(self._root, atom)
7163 if pkg is not None and self._visible(pkg):
7164 self._cpv_pkg_map[pkg.cpv] = pkg
7167 if pkg is not None and \
7168 atom.slot is None and \
7169 pkg.cp.startswith("virtual/") and \
7170 (("remove" not in self._depgraph._dynamic_config.myparams and
7171 "--update" not in self._depgraph._frozen_config.myopts) or
7173 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
7174 # For new-style virtual lookahead that occurs inside dep_check()
7175 # for bug #141118, examine all slots. This is needed so that newer
7176 # slots will not unnecessarily be pulled in when a satisfying lower
7177 # slot is already installed. For example, if virtual/jdk-1.5 is
7178 # satisfied via gcj-jdk then there's no need to pull in a newer
7179 # slot to satisfy a virtual/jdk dependency, unless --update is
7183 for virt_pkg in self._depgraph._iter_match_pkgs_any(
7184 self._depgraph._frozen_config.roots[self._root], atom):
7185 if virt_pkg.cp != pkg.cp:
7187 slots.add(virt_pkg.slot)
7189 slots.remove(pkg.slot)
7191 slot_atom = atom.with_slot(slots.pop())
7192 pkg, existing = self._depgraph._select_package(
7193 self._root, slot_atom)
7196 if not self._visible(pkg):
7198 self._cpv_pkg_map[pkg.cpv] = pkg
7202 self._cpv_sort_ascending(ret)
7204 self._match_cache[cache_key] = ret
7207 def _visible(self, pkg):
7208 if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
7210 if pkg.installed and \
7211 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
7212 # Account for packages with masks (like KEYWORDS masks)
7213 # that are usually ignored in visibility checks for
7214 # installed packages, in order to handle cases like
7216 myopts = self._depgraph._frozen_config.myopts
7217 use_ebuild_visibility = myopts.get(
7218 '--use-ebuild-visibility', 'n') != 'n'
7219 avoid_update = "--update" not in myopts and \
7220 "remove" not in self._depgraph._dynamic_config.myparams
7221 usepkgonly = "--usepkgonly" in myopts
7222 if not avoid_update:
7223 if not use_ebuild_visibility and usepkgonly:
7225 elif not self._depgraph._equiv_ebuild_visible(pkg):
7228 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
7229 self._root].get(pkg.slot_atom)
7230 if in_graph is None:
7231 # Mask choices for packages which are not the highest visible
7232 # version within their slot (since they usually trigger slot
7234 highest_visible, in_graph = self._depgraph._select_package(
7235 self._root, pkg.slot_atom)
7236 # Note: highest_visible is not necessarily the real highest
7237 # visible, especially when --update is not enabled, so use
7238 # < operator instead of !=.
7239 if highest_visible is not None and pkg < highest_visible:
7241 elif in_graph != pkg:
7242 # Mask choices for packages that would trigger a slot
7243 # conflict with a previously selected package.
7247 def aux_get(self, cpv, wants):
7248 metadata = self._cpv_pkg_map[cpv].metadata
7249 return [metadata.get(x, "") for x in wants]
7251 def match_pkgs(self, atom):
7252 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
7254 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
7256 if "--quiet" in myopts:
7257 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7258 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
7259 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7260 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
7263 s = search(root_config, spinner, "--searchdesc" in myopts,
7264 "--quiet" not in myopts, "--usepkg" in myopts,
7265 "--usepkgonly" in myopts)
7266 null_cp = portage.dep_getkey(insert_category_into_atom(
7268 cat, atom_pn = portage.catsplit(null_cp)
7269 s.searchkey = atom_pn
7270 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7273 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7274 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
7276 def _spinner_start(spinner, myopts):
7279 if "--quiet" not in myopts and \
7280 ("--pretend" in myopts or "--ask" in myopts or \
7281 "--tree" in myopts or "--verbose" in myopts):
7283 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7285 elif "--buildpkgonly" in myopts:
7289 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
7290 if "--unordered-display" in myopts:
7291 portage.writemsg_stdout("\n" + \
7292 darkgreen("These are the packages that " + \
7293 "would be %s:" % action) + "\n\n")
7295 portage.writemsg_stdout("\n" + \
7296 darkgreen("These are the packages that " + \
7297 "would be %s, in reverse order:" % action) + "\n\n")
7299 portage.writemsg_stdout("\n" + \
7300 darkgreen("These are the packages that " + \
7301 "would be %s, in order:" % action) + "\n\n")
7303 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
7304 if not show_spinner:
7305 spinner.update = spinner.update_quiet
7308 portage.writemsg_stdout("Calculating dependencies ")
7310 def _spinner_stop(spinner):
7311 if spinner is None or \
7312 spinner.update == spinner.update_quiet:
7315 if spinner.update != spinner.update_basic:
7316 # update_basic is used for non-tty output,
7317 # so don't output backspaces in that case.
7318 portage.writemsg_stdout("\b\b")
7320 portage.writemsg_stdout("... done!\n")
7322 def backtrack_depgraph(settings, trees, myopts, myparams,
7323 myaction, myfiles, spinner):
7325 Raises PackageSetNotFound if myfiles contains a missing package set.
7327 _spinner_start(spinner, myopts)
7329 return _backtrack_depgraph(settings, trees, myopts, myparams,
7330 myaction, myfiles, spinner)
7332 _spinner_stop(spinner)
7335 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
7337 debug = "--debug" in myopts
7339 max_retries = myopts.get('--backtrack', 10)
7340 max_depth = max(1, (max_retries + 1) / 2)
7341 allow_backtracking = max_retries > 0
7342 backtracker = Backtracker(max_depth)
7345 frozen_config = _frozen_depgraph_config(settings, trees,
7350 if debug and mydepgraph is not None:
7352 "\n\nbacktracking try %s \n\n" % \
7353 backtracked, noiselevel=-1, level=logging.DEBUG)
7354 mydepgraph.display_problems()
7356 backtrack_parameters = backtracker.get()
7358 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7359 frozen_config=frozen_config,
7360 allow_backtracking=allow_backtracking,
7361 backtrack_parameters=backtrack_parameters)
7362 success, favorites = mydepgraph.select_files(myfiles)
7364 if success or mydepgraph.success_without_autounmask():
7366 elif not allow_backtracking:
7368 elif backtracked >= max_retries:
7370 elif mydepgraph.need_restart():
7372 backtracker.feedback(mydepgraph.get_backtrack_infos())
7376 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
7380 "\n\nbacktracking aborted after %s tries\n\n" % \
7381 backtracked, noiselevel=-1, level=logging.DEBUG)
7382 mydepgraph.display_problems()
7384 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7385 frozen_config=frozen_config,
7386 allow_backtracking=False,
7387 backtrack_parameters=backtracker.get_best_run())
7388 success, favorites = mydepgraph.select_files(myfiles)
7390 if not success and mydepgraph.autounmask_breakage_detected():
7393 "\n\nautounmask breakage detected\n\n",
7394 noiselevel=-1, level=logging.DEBUG)
7395 mydepgraph.display_problems()
7396 myopts["--autounmask"] = "n"
7397 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7398 frozen_config=frozen_config, allow_backtracking=False)
7399 success, favorites = mydepgraph.select_files(myfiles)
7401 return (success, mydepgraph, favorites)
7404 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7406 Raises PackageSetNotFound if myfiles contains a missing package set.
7408 _spinner_start(spinner, myopts)
7410 return _resume_depgraph(settings, trees, mtimedb, myopts,
7413 _spinner_stop(spinner)
7415 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7417 Construct a depgraph for the given resume list. This will raise
7418 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
7419 TODO: Return reasons for dropped_tasks, for display/logging.
7421 @return: (success, depgraph, dropped_tasks)
7424 skip_unsatisfied = True
7425 mergelist = mtimedb["resume"]["mergelist"]
7426 dropped_tasks = set()
7427 frozen_config = _frozen_depgraph_config(settings, trees,
7430 mydepgraph = depgraph(settings, trees,
7431 myopts, myparams, spinner, frozen_config=frozen_config)
7433 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7434 skip_masked=skip_masked)
7435 except depgraph.UnsatisfiedResumeDep as e:
7436 if not skip_unsatisfied:
7439 graph = mydepgraph._dynamic_config.digraph
7440 unsatisfied_parents = dict((dep.parent, dep.parent) \
7442 traversed_nodes = set()
7443 unsatisfied_stack = list(unsatisfied_parents)
7444 while unsatisfied_stack:
7445 pkg = unsatisfied_stack.pop()
7446 if pkg in traversed_nodes:
7448 traversed_nodes.add(pkg)
7450 # If this package was pulled in by a parent
7451 # package scheduled for merge, removing this
7452 # package may cause the the parent package's
7453 # dependency to become unsatisfied.
7454 for parent_node in graph.parent_nodes(pkg):
7455 if not isinstance(parent_node, Package) \
7456 or parent_node.operation not in ("merge", "nomerge"):
7458 # We need to traverse all priorities here, in order to
7459 # ensure that a package with an unsatisfied depenedency
7460 # won't get pulled in, even indirectly via a soft
7462 unsatisfied_parents[parent_node] = parent_node
7463 unsatisfied_stack.append(parent_node)
7465 unsatisfied_tuples = frozenset(tuple(parent_node)
7466 for parent_node in unsatisfied_parents
7467 if isinstance(parent_node, Package))
7468 pruned_mergelist = []
7470 if isinstance(x, list) and \
7471 tuple(x) not in unsatisfied_tuples:
7472 pruned_mergelist.append(x)
7474 # If the mergelist doesn't shrink then this loop is infinite.
7475 if len(pruned_mergelist) == len(mergelist):
7476 # This happens if a package can't be dropped because
7477 # it's already installed, but it has unsatisfied PDEPEND.
7479 mergelist[:] = pruned_mergelist
7481 # Exclude installed packages that have been removed from the graph due
7482 # to failure to build/install runtime dependencies after the dependent
7483 # package has already been installed.
7484 dropped_tasks.update(pkg for pkg in \
7485 unsatisfied_parents if pkg.operation != "nomerge")
7487 del e, graph, traversed_nodes, \
7488 unsatisfied_parents, unsatisfied_stack
7492 return (success, mydepgraph, dropped_tasks)
7494 def get_mask_info(root_config, cpv, pkgsettings,
7495 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
7497 metadata = dict(zip(db_keys,
7498 db.aux_get(cpv, db_keys, myrepo=myrepo)))
7502 if metadata is None:
7503 mreasons = ["corruption"]
7505 eapi = metadata['EAPI']
7506 if not portage.eapi_is_supported(eapi):
7507 mreasons = ['EAPI %s' % eapi]
7509 pkg = Package(type_name=pkg_type, root_config=root_config,
7510 cpv=cpv, built=built, installed=installed, metadata=metadata)
7513 if _pkg_use_enabled is not None:
7514 modified_use = _pkg_use_enabled(pkg)
7516 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7518 return metadata, mreasons
7520 def show_masked_packages(masked_packages):
7521 shown_licenses = set()
7522 shown_comments = set()
7523 # Maybe there is both an ebuild and a binary. Only
7524 # show one of them to avoid redundant appearance.
7526 have_eapi_mask = False
7527 for (root_config, pkgsettings, cpv, repo,
7528 metadata, mreasons) in masked_packages:
7531 output_cpv += _repo_separator + repo
7532 if output_cpv in shown_cpvs:
7534 shown_cpvs.add(output_cpv)
7535 eapi_masked = metadata is not None and \
7536 not portage.eapi_is_supported(metadata["EAPI"])
7538 have_eapi_mask = True
7539 # When masked by EAPI, metadata is mostly useless since
7540 # it doesn't contain essential things like SLOT.
7542 comment, filename = None, None
7543 if not eapi_masked and \
7544 "package.mask" in mreasons:
7545 comment, filename = \
7546 portage.getmaskingreason(
7547 cpv, metadata=metadata,
7548 settings=pkgsettings,
7549 portdb=root_config.trees["porttree"].dbapi,
7550 return_location=True)
7551 missing_licenses = []
7552 if not eapi_masked and metadata is not None:
7554 missing_licenses = \
7555 pkgsettings._getMissingLicenses(
7557 except portage.exception.InvalidDependString:
7558 # This will have already been reported
7559 # above via mreasons.
7562 writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
7565 if comment and comment not in shown_comments:
7566 writemsg(filename + ":\n" + comment + "\n",
7568 shown_comments.add(comment)
7569 portdb = root_config.trees["porttree"].dbapi
7570 for l in missing_licenses:
7571 l_path = portdb.findLicensePath(l)
7572 if l in shown_licenses:
7574 msg = ("A copy of the '%s' license" + \
7575 " is located at '%s'.\n\n") % (l, l_path)
7576 writemsg(msg, noiselevel=-1)
7577 shown_licenses.add(l)
7578 return have_eapi_mask
7580 def show_mask_docs():
7581 writemsg("For more information, see the MASKED PACKAGES "
7582 "section in the emerge\n", noiselevel=-1)
7583 writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7585 def show_blocker_docs_link():
7586 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7587 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7588 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7590 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7591 return [mreason.message for \
7592 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7594 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7595 mreasons = _getmaskingstatus(
7596 pkg, settings=pkgsettings,
7597 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7599 if not pkg.installed:
7600 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
7601 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7602 pkg.metadata["CHOST"]))
7605 for msgs in pkg.invalid.values():
7608 _MaskReason("invalid", "invalid: %s" % (msg,)))
7610 if not pkg.metadata["SLOT"]:
7612 _MaskReason("invalid", "SLOT: undefined"))