1 # Copyright 1999-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
13 from collections import deque
14 from itertools import chain
17 from portage import os, OrderedDict
18 from portage import _unicode_decode, _unicode_encode, _encodings
19 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
20 from portage.dbapi import dbapi
21 from portage.dbapi.dep_expand import dep_expand
22 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
23 check_required_use, human_readable_required_use, match_from_list, \
25 from portage.dep._slot_operator import ignore_built_slot_operator_deps
26 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
27 from portage.exception import (InvalidAtom, InvalidDependString,
28 PackageNotFound, PortageException)
29 from portage.output import colorize, create_color_func, \
31 bad = create_color_func("BAD")
32 from portage.package.ebuild.getmaskingstatus import \
33 _getmaskingstatus, _MaskReason
34 from portage._sets import SETPREFIX
35 from portage._sets.base import InternalPackageSet
36 from portage.util import ConfigProtect, shlex_split, new_protect_filename
37 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
38 from portage.util import ensure_dirs
39 from portage.util import writemsg_level, write_atomic
40 from portage.util.digraph import digraph
41 from portage.util.listdir import _ignorecvs_dirs
42 from portage.versions import catpkgsplit
44 from _emerge.AtomArg import AtomArg
45 from _emerge.Blocker import Blocker
46 from _emerge.BlockerCache import BlockerCache
47 from _emerge.BlockerDepPriority import BlockerDepPriority
48 from _emerge.countdown import countdown
49 from _emerge.create_world_atom import create_world_atom
50 from _emerge.Dependency import Dependency
51 from _emerge.DependencyArg import DependencyArg
52 from _emerge.DepPriority import DepPriority
53 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
54 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
55 from _emerge.FakeVartree import FakeVartree
56 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
57 from _emerge.is_valid_package_atom import insert_category_into_atom, \
59 from _emerge.Package import Package
60 from _emerge.PackageArg import PackageArg
61 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
62 from _emerge.RootConfig import RootConfig
63 from _emerge.search import search
64 from _emerge.SetArg import SetArg
65 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
66 from _emerge.UnmergeDepPriority import UnmergeDepPriority
67 from _emerge.UseFlagDisplay import pkg_use_display
68 from _emerge.userquery import userquery
70 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
71 from _emerge.resolver.slot_collision import slot_conflict_handler
72 from _emerge.resolver.circular_dependency import circular_dependency_handler
73 from _emerge.resolver.output import Display
75 if sys.hexversion >= 0x3000000:
82 class _scheduler_graph_config(object):
83 def __init__(self, trees, pkg_cache, graph, mergelist):
85 self.pkg_cache = pkg_cache
87 self.mergelist = mergelist
89 def _wildcard_set(atoms):
90 pkgs = InternalPackageSet(allow_wildcard=True)
93 x = Atom(x, allow_wildcard=True, allow_repo=False)
94 except portage.exception.InvalidAtom:
95 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
99 class _frozen_depgraph_config(object):
101 def __init__(self, settings, trees, myopts, spinner):
102 self.settings = settings
103 self.target_root = settings["EROOT"]
106 if settings.get("PORTAGE_DEBUG", "") == "1":
108 self.spinner = spinner
109 self._running_root = trees[trees._running_eroot]["root_config"]
110 self.pkgsettings = {}
112 self._trees_orig = trees
114 # All Package instances
116 self._highest_license_masked = {}
117 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
118 ignore_built_slot_operator_deps = myopts.get(
119 "--ignore-built-slot-operator-deps", "n") == "y"
121 self.trees[myroot] = {}
122 # Create a RootConfig instance that references
123 # the FakeVartree instead of the real one.
124 self.roots[myroot] = RootConfig(
125 trees[myroot]["vartree"].settings,
127 trees[myroot]["root_config"].setconfig)
128 for tree in ("porttree", "bintree"):
129 self.trees[myroot][tree] = trees[myroot][tree]
130 self.trees[myroot]["vartree"] = \
131 FakeVartree(trees[myroot]["root_config"],
132 pkg_cache=self._pkg_cache,
133 pkg_root_config=self.roots[myroot],
134 dynamic_deps=dynamic_deps,
135 ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
136 self.pkgsettings[myroot] = portage.config(
137 clone=self.trees[myroot]["vartree"].settings)
139 self._required_set_names = set(["world"])
141 atoms = ' '.join(myopts.get("--exclude", [])).split()
142 self.excluded_pkgs = _wildcard_set(atoms)
143 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
144 self.reinstall_atoms = _wildcard_set(atoms)
145 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
146 self.usepkg_exclude = _wildcard_set(atoms)
147 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
148 self.useoldpkg_atoms = _wildcard_set(atoms)
149 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
150 self.rebuild_exclude = _wildcard_set(atoms)
151 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
152 self.rebuild_ignore = _wildcard_set(atoms)
154 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
155 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
156 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
158 class _depgraph_sets(object):
160 # contains all sets added to the graph
162 # contains non-set atoms given as arguments
163 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
164 # contains all atoms from all sets added to the graph, including
165 # atoms given as arguments
166 self.atoms = InternalPackageSet(allow_repo=True)
167 self.atom_arg_map = {}
169 class _rebuild_config(object):
170 def __init__(self, frozen_config, backtrack_parameters):
171 self._graph = digraph()
172 self._frozen_config = frozen_config
173 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
174 self.orig_rebuild_list = self.rebuild_list.copy()
175 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
176 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
177 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
178 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
179 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
180 self.rebuild_if_unbuilt)
182 def add(self, dep_pkg, dep):
183 parent = dep.collapsed_parent
184 priority = dep.collapsed_priority
185 rebuild_exclude = self._frozen_config.rebuild_exclude
186 rebuild_ignore = self._frozen_config.rebuild_ignore
187 if (self.rebuild and isinstance(parent, Package) and
188 parent.built and priority.buildtime and
189 isinstance(dep_pkg, Package) and
190 not rebuild_exclude.findAtomForPackage(parent) and
191 not rebuild_ignore.findAtomForPackage(dep_pkg)):
192 self._graph.add(dep_pkg, parent, priority)
194 def _needs_rebuild(self, dep_pkg):
195 """Check whether packages that depend on dep_pkg need to be rebuilt."""
196 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
197 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
200 if self.rebuild_if_unbuilt:
201 # dep_pkg is being installed from source, so binary
202 # packages for parents are invalid. Force rebuild
205 trees = self._frozen_config.trees
206 vardb = trees[dep_pkg.root]["vartree"].dbapi
207 if self.rebuild_if_new_rev:
208 # Parent packages are valid if a package with the same
209 # cpv is already installed.
210 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
212 # Otherwise, parent packages are valid if a package with the same
213 # version (excluding revision) is already installed.
214 assert self.rebuild_if_new_ver
215 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
216 for inst_cpv in vardb.match(dep_pkg.slot_atom):
217 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
218 if inst_cpv_norev == cpv_norev:
223 def _trigger_rebuild(self, parent, build_deps):
224 root_slot = (parent.root, parent.slot_atom)
225 if root_slot in self.rebuild_list:
227 trees = self._frozen_config.trees
229 for slot_atom, dep_pkg in build_deps.items():
230 dep_root_slot = (dep_pkg.root, slot_atom)
231 if self._needs_rebuild(dep_pkg):
232 self.rebuild_list.add(root_slot)
234 elif ("--usepkg" in self._frozen_config.myopts and
235 (dep_root_slot in self.reinstall_list or
236 dep_root_slot in self.rebuild_list or
237 not dep_pkg.installed)):
239 # A direct rebuild dependency is being installed. We
240 # should update the parent as well to the latest binary,
241 # if that binary is valid.
243 # To validate the binary, we check whether all of the
244 # rebuild dependencies are present on the same binhost.
246 # 1) If parent is present on the binhost, but one of its
247 # rebuild dependencies is not, then the parent should
248 # be rebuilt from source.
249 # 2) Otherwise, the parent binary is assumed to be valid,
250 # because all of its rebuild dependencies are
252 bintree = trees[parent.root]["bintree"]
253 uri = bintree.get_pkgindex_uri(parent.cpv)
254 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
255 bindb = bintree.dbapi
256 if self.rebuild_if_new_ver and uri and uri != dep_uri:
257 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
258 for cpv in bindb.match(dep_pkg.slot_atom):
259 if cpv_norev == catpkgsplit(cpv)[:-1]:
260 dep_uri = bintree.get_pkgindex_uri(cpv)
263 if uri and uri != dep_uri:
264 # 1) Remote binary package is invalid because it was
265 # built without dep_pkg. Force rebuild.
266 self.rebuild_list.add(root_slot)
268 elif (parent.installed and
269 root_slot not in self.reinstall_list):
270 inst_build_time = parent.metadata.get("BUILD_TIME")
272 bin_build_time, = bindb.aux_get(parent.cpv,
276 if bin_build_time != inst_build_time:
277 # 2) Remote binary package is valid, and local package
278 # is not up to date. Force reinstall.
281 self.reinstall_list.add(root_slot)
284 def trigger_rebuilds(self):
286 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
287 depends on pkgA at both build-time and run-time, pkgB needs to be
294 leaf_nodes = deque(graph.leaf_nodes())
296 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
297 # will always know which children are being rebuilt.
300 # We'll have to drop an edge. This should be quite rare.
301 leaf_nodes.append(graph.order[-1])
303 node = leaf_nodes.popleft()
304 if node not in graph:
305 # This can be triggered by circular dependencies.
307 slot_atom = node.slot_atom
309 # Remove our leaf node from the graph, keeping track of deps.
310 parents = graph.parent_nodes(node)
312 node_build_deps = build_deps.get(node, {})
313 for parent in parents:
315 # Ignore a direct cycle.
317 parent_bdeps = build_deps.setdefault(parent, {})
318 parent_bdeps[slot_atom] = node
319 if not graph.child_nodes(parent):
320 leaf_nodes.append(parent)
322 # Trigger rebuilds for our leaf node. Because all of our children
323 # have been processed, the build_deps will be completely filled in,
324 # and self.rebuild_list / self.reinstall_list will tell us whether
325 # any of our children need to be rebuilt or reinstalled.
326 if self._trigger_rebuild(node, node_build_deps):
332 class _dynamic_depgraph_config(object):
334 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
335 self.myparams = myparams.copy()
336 self._vdb_loaded = False
337 self._allow_backtracking = allow_backtracking
338 # Maps slot atom to package for each Package added to the graph.
339 self._slot_pkg_map = {}
340 # Maps nodes to the reasons they were selected for reinstallation.
341 self._reinstall_nodes = {}
343 # Contains a filtered view of preferred packages that are selected
344 # from available repositories.
345 self._filtered_trees = {}
346 # Contains installed packages and new packages that have been added
348 self._graph_trees = {}
349 # Caches visible packages returned from _select_package, for use in
350 # depgraph._iter_atoms_for_pkg() SLOT logic.
351 self._visible_pkgs = {}
352 #contains the args created by select_files
353 self._initial_arg_list = []
354 self.digraph = portage.digraph()
355 # manages sets added to the graph
357 # contains all nodes pulled in by self.sets
358 self._set_nodes = set()
359 # Contains only Blocker -> Uninstall edges
360 self._blocker_uninstalls = digraph()
361 # Contains only Package -> Blocker edges
362 self._blocker_parents = digraph()
363 # Contains only irrelevant Package -> Blocker edges
364 self._irrelevant_blockers = digraph()
365 # Contains only unsolvable Package -> Blocker edges
366 self._unsolvable_blockers = digraph()
367 # Contains all Blocker -> Blocked Package edges
368 self._blocked_pkgs = digraph()
369 # Contains world packages that have been protected from
370 # uninstallation but may not have been added to the graph
371 # if the graph is not complete yet.
372 self._blocked_world_pkgs = {}
373 # Contains packages whose dependencies have been traversed.
374 # This use used to check if we have accounted for blockers
375 # relevant to a package.
376 self._traversed_pkg_deps = set()
377 # This should be ordered such that the backtracker will
378 # attempt to solve conflicts which occurred earlier first,
379 # since an earlier conflict can be the cause of a conflict
380 # which occurs later.
381 self._slot_collision_info = OrderedDict()
382 # Slot collision nodes are not allowed to block other packages since
383 # blocker validation is only able to account for one package per slot.
384 self._slot_collision_nodes = set()
385 self._parent_atoms = {}
386 self._slot_conflict_handler = None
387 self._circular_dependency_handler = None
388 self._serialized_tasks_cache = None
389 self._scheduler_graph = None
390 self._displayed_list = None
391 self._pprovided_args = []
392 self._missing_args = []
393 self._masked_installed = set()
394 self._masked_license_updates = set()
395 self._unsatisfied_deps_for_display = []
396 self._unsatisfied_blockers_for_display = None
397 self._circular_deps_for_display = None
399 self._dep_disjunctive_stack = []
400 self._unsatisfied_deps = []
401 self._initially_unsatisfied_deps = []
402 self._ignored_deps = []
403 self._highest_pkg_cache = {}
405 # Binary packages that have been rejected because their USE
406 # didn't match the user's config. It maps packages to a set
407 # of flags causing the rejection.
408 self.ignored_binaries = {}
410 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
411 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
412 self._needed_license_changes = backtrack_parameters.needed_license_changes
413 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
414 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
415 self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
416 self._need_restart = False
417 # For conditions that always require user intervention, such as
418 # unsatisfied REQUIRED_USE (currently has no autounmask support).
419 self._skip_restart = False
420 self._backtrack_infos = {}
422 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
423 self._success_without_autounmask = False
424 self._traverse_ignored_deps = False
425 self._complete_mode = False
426 self._slot_operator_deps = {}
428 for myroot in depgraph._frozen_config.trees:
429 self.sets[myroot] = _depgraph_sets()
430 self._slot_pkg_map[myroot] = {}
431 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
432 # This dbapi instance will model the state that the vdb will
433 # have after new packages have been installed.
434 fakedb = PackageVirtualDbapi(vardb.settings)
436 self.mydbapi[myroot] = fakedb
439 graph_tree.dbapi = fakedb
440 self._graph_trees[myroot] = {}
441 self._filtered_trees[myroot] = {}
442 # Substitute the graph tree for the vartree in dep_check() since we
443 # want atom selections to be consistent with package selections
444 # have already been made.
445 self._graph_trees[myroot]["porttree"] = graph_tree
446 self._graph_trees[myroot]["vartree"] = graph_tree
447 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
448 self._graph_trees[myroot]["graph"] = self.digraph
451 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
452 self._filtered_trees[myroot]["porttree"] = filtered_tree
453 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
455 # Passing in graph_tree as the vartree here could lead to better
456 # atom selections in some cases by causing atoms for packages that
457 # have been added to the graph to be preferred over other choices.
458 # However, it can trigger atom selections that result in
459 # unresolvable direct circular dependencies. For example, this
460 # happens with gwydion-dylan which depends on either itself or
461 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
462 # gwydion-dylan-bin needs to be selected in order to avoid a
463 # an unresolvable direct circular dependency.
465 # To solve the problem described above, pass in "graph_db" so that
466 # packages that have been added to the graph are distinguishable
467 # from other available packages and installed packages. Also, pass
468 # the parent package into self._select_atoms() calls so that
469 # unresolvable direct circular dependencies can be detected and
470 # avoided when possible.
471 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
472 self._filtered_trees[myroot]["graph"] = self.digraph
473 self._filtered_trees[myroot]["vartree"] = \
474 depgraph._frozen_config.trees[myroot]["vartree"]
477 # (db, pkg_type, built, installed, db_keys)
478 if "remove" in self.myparams:
479 # For removal operations, use _dep_check_composite_db
480 # for availability and visibility checks. This provides
481 # consistency with install operations, so we don't
482 # get install/uninstall cycles like in bug #332719.
483 self._graph_trees[myroot]["porttree"] = filtered_tree
485 if "--usepkgonly" not in depgraph._frozen_config.myopts:
486 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
487 db_keys = list(portdb._aux_cache_keys)
488 dbs.append((portdb, "ebuild", False, False, db_keys))
490 if "--usepkg" in depgraph._frozen_config.myopts:
491 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
492 db_keys = list(bindb._aux_cache_keys)
493 dbs.append((bindb, "binary", True, False, db_keys))
495 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
496 db_keys = list(depgraph._frozen_config._trees_orig[myroot
497 ]["vartree"].dbapi._aux_cache_keys)
498 dbs.append((vardb, "installed", True, True, db_keys))
499 self._filtered_trees[myroot]["dbs"] = dbs
501 class depgraph(object):
503 pkg_tree_map = RootConfig.pkg_tree_map
505 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
507 def __init__(self, settings, trees, myopts, myparams, spinner,
508 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
509 if frozen_config is None:
510 frozen_config = _frozen_depgraph_config(settings, trees,
512 self._frozen_config = frozen_config
513 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
514 allow_backtracking, backtrack_parameters)
515 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
517 self._select_atoms = self._select_atoms_highest_available
518 self._select_package = self._select_pkg_highest_available
522 Load installed package metadata if appropriate. This used to be called
523 from the constructor, but that wasn't very nice since this procedure
524 is slow and it generates spinner output. So, now it's called on-demand
525 by various methods when necessary.
528 if self._dynamic_config._vdb_loaded:
531 for myroot in self._frozen_config.trees:
533 dynamic_deps = self._dynamic_config.myparams.get(
534 "dynamic_deps", "y") != "n"
535 preload_installed_pkgs = \
536 "--nodeps" not in self._frozen_config.myopts
538 if self._frozen_config.myopts.get("--root-deps") is not None and \
539 myroot != self._frozen_config.target_root:
542 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
543 if not fake_vartree.dbapi:
544 # This needs to be called for the first depgraph, but not for
545 # backtracking depgraphs that share the same frozen_config.
548 # FakeVartree.sync() populates virtuals, and we want
549 # self.pkgsettings to have them populated too.
550 self._frozen_config.pkgsettings[myroot] = \
551 portage.config(clone=fake_vartree.settings)
553 if preload_installed_pkgs:
554 vardb = fake_vartree.dbapi
555 fakedb = self._dynamic_config._graph_trees[
556 myroot]["vartree"].dbapi
559 self._spinner_update()
561 # This causes FakeVartree to update the
562 # Package instance dependencies via
563 # PackageVirtualDbapi.aux_update()
564 vardb.aux_get(pkg.cpv, [])
565 fakedb.cpv_inject(pkg)
567 self._dynamic_config._vdb_loaded = True
569 def _spinner_update(self):
570 if self._frozen_config.spinner:
571 self._frozen_config.spinner.update()
573 def _show_ignored_binaries(self):
575 Show binaries that have been ignored because their USE didn't
576 match the user's config.
578 if not self._dynamic_config.ignored_binaries \
579 or '--quiet' in self._frozen_config.myopts \
580 or self._dynamic_config.myparams.get(
581 "binpkg_respect_use") in ("y", "n"):
584 for pkg in list(self._dynamic_config.ignored_binaries):
586 selected_pkg = self._dynamic_config.mydbapi[pkg.root
587 ].match_pkgs(pkg.slot_atom)
592 selected_pkg = selected_pkg[-1]
593 if selected_pkg > pkg:
594 self._dynamic_config.ignored_binaries.pop(pkg)
597 if selected_pkg.installed and \
598 selected_pkg.cpv == pkg.cpv and \
599 selected_pkg.metadata.get('BUILD_TIME') == \
600 pkg.metadata.get('BUILD_TIME'):
601 # We don't care about ignored binaries when an
602 # identical installed instance is selected to
604 self._dynamic_config.ignored_binaries.pop(pkg)
607 if not self._dynamic_config.ignored_binaries:
610 self._show_merge_list()
612 writemsg("\n!!! The following binary packages have been ignored " + \
613 "due to non matching USE:\n\n", noiselevel=-1)
615 for pkg, flags in self._dynamic_config.ignored_binaries.items():
617 for flag in sorted(flags):
618 if flag not in pkg.use.enabled:
620 flag_display.append(flag)
621 flag_display = " ".join(flag_display)
622 # The user can paste this line into package.use
623 writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
624 if pkg.root_config.settings["ROOT"] != "/":
625 writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
626 writemsg("\n", noiselevel=-1)
630 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
631 " from ignoring these binary packages if possible.",
632 " Using --binpkg-respect-use=y will silence this warning."
637 line = colorize("INFORM", line)
638 writemsg(line + "\n", noiselevel=-1)
640 def _show_missed_update(self):
642 # In order to minimize noise, show only the highest
643 # missed update from each SLOT.
645 for pkg, mask_reasons in \
646 self._dynamic_config._runtime_pkg_mask.items():
648 # Exclude installed here since we only
649 # want to show available updates.
651 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
652 ].match_pkgs(pkg.slot_atom)
653 if not chosen_pkg or chosen_pkg[-1] >= pkg:
655 k = (pkg.root, pkg.slot_atom)
656 if k in missed_updates:
657 other_pkg, mask_type, parent_atoms = missed_updates[k]
660 for mask_type, parent_atoms in mask_reasons.items():
663 missed_updates[k] = (pkg, mask_type, parent_atoms)
666 if not missed_updates:
669 missed_update_types = {}
670 for pkg, mask_type, parent_atoms in missed_updates.values():
671 missed_update_types.setdefault(mask_type,
672 []).append((pkg, parent_atoms))
674 if '--quiet' in self._frozen_config.myopts and \
675 '--debug' not in self._frozen_config.myopts:
676 missed_update_types.pop("slot conflict", None)
677 missed_update_types.pop("missing dependency", None)
679 self._show_missed_update_slot_conflicts(
680 missed_update_types.get("slot conflict"))
682 self._show_missed_update_unsatisfied_dep(
683 missed_update_types.get("missing dependency"))
685 def _show_missed_update_unsatisfied_dep(self, missed_updates):
687 if not missed_updates:
690 self._show_merge_list()
691 backtrack_masked = []
693 for pkg, parent_atoms in missed_updates:
696 for parent, root, atom in parent_atoms:
697 self._show_unsatisfied_dep(root, atom, myparent=parent,
698 check_backtrack=True)
699 except self._backtrack_mask:
700 # This is displayed below in abbreviated form.
701 backtrack_masked.append((pkg, parent_atoms))
704 writemsg("\n!!! The following update has been skipped " + \
705 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
707 writemsg(str(pkg.slot_atom), noiselevel=-1)
708 if pkg.root_config.settings["ROOT"] != "/":
709 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
710 writemsg("\n", noiselevel=-1)
712 for parent, root, atom in parent_atoms:
713 self._show_unsatisfied_dep(root, atom, myparent=parent)
714 writemsg("\n", noiselevel=-1)
717 # These are shown in abbreviated form, in order to avoid terminal
718 # flooding from mask messages as reported in bug #285832.
719 writemsg("\n!!! The following update(s) have been skipped " + \
720 "due to unsatisfied dependencies\n" + \
721 "!!! triggered by backtracking:\n\n", noiselevel=-1)
722 for pkg, parent_atoms in backtrack_masked:
723 writemsg(str(pkg.slot_atom), noiselevel=-1)
724 if pkg.root_config.settings["ROOT"] != "/":
725 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
726 writemsg("\n", noiselevel=-1)
728 def _show_missed_update_slot_conflicts(self, missed_updates):
730 if not missed_updates:
733 self._show_merge_list()
735 msg.append("\nWARNING: One or more updates have been " + \
736 "skipped due to a dependency conflict:\n\n")
739 for pkg, parent_atoms in missed_updates:
740 msg.append(str(pkg.slot_atom))
741 if pkg.root_config.settings["ROOT"] != "/":
742 msg.append(" for %s" % (pkg.root,))
745 for parent, atom in parent_atoms:
749 msg.append(" conflicts with\n")
751 if isinstance(parent,
752 (PackageArg, AtomArg)):
753 # For PackageArg and AtomArg types, it's
754 # redundant to display the atom attribute.
755 msg.append(str(parent))
757 # Display the specific atom from SetArg or
759 msg.append("%s required by %s" % (atom, parent))
763 writemsg("".join(msg), noiselevel=-1)
765 def _show_slot_collision_notice(self):
766 """Show an informational message advising the user to mask one of the
767 the packages. In some cases it may be possible to resolve this
768 automatically, but support for backtracking (removal nodes that have
769 already been selected) will be required in order to handle all possible
773 if not self._dynamic_config._slot_collision_info:
776 self._show_merge_list()
778 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
779 handler = self._dynamic_config._slot_conflict_handler
781 conflict = handler.get_conflict()
782 writemsg(conflict, noiselevel=-1)
784 explanation = handler.get_explanation()
786 writemsg(explanation, noiselevel=-1)
789 if "--quiet" in self._frozen_config.myopts:
793 msg.append("It may be possible to solve this problem ")
794 msg.append("by using package.mask to prevent one of ")
795 msg.append("those packages from being selected. ")
796 msg.append("However, it is also possible that conflicting ")
797 msg.append("dependencies exist such that they are impossible to ")
798 msg.append("satisfy simultaneously. If such a conflict exists in ")
799 msg.append("the dependencies of two different packages, then those ")
800 msg.append("packages can not be installed simultaneously.")
801 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
802 if not self._dynamic_config._allow_backtracking and \
803 (backtrack_opt is None or \
804 (backtrack_opt > 0 and backtrack_opt < 30)):
805 msg.append(" You may want to try a larger value of the ")
806 msg.append("--backtrack option, such as --backtrack=30, ")
807 msg.append("in order to see if that will solve this conflict ")
808 msg.append("automatically.")
810 for line in textwrap.wrap(''.join(msg), 70):
811 writemsg(line + '\n', noiselevel=-1)
812 writemsg('\n', noiselevel=-1)
815 msg.append("For more information, see MASKED PACKAGES ")
816 msg.append("section in the emerge man page or refer ")
817 msg.append("to the Gentoo Handbook.")
818 for line in textwrap.wrap(''.join(msg), 70):
819 writemsg(line + '\n', noiselevel=-1)
820 writemsg('\n', noiselevel=-1)
822 def _process_slot_conflicts(self):
824 If there are any slot conflicts and backtracking is enabled,
825 _complete_graph should complete the graph before this method
826 is called, so that all relevant reverse dependencies are
827 available for use in backtracking decisions.
829 for (slot_atom, root), slot_nodes in \
830 self._dynamic_config._slot_collision_info.items():
831 self._process_slot_conflict(root, slot_atom, slot_nodes)
833 def _process_slot_conflict(self, root, slot_atom, slot_nodes):
835 Process slot conflict data to identify specific atoms which
836 lead to conflict. These atoms only match a subset of the
837 packages that have been pulled into a given slot.
840 debug = "--debug" in self._frozen_config.myopts
842 slot_parent_atoms = set()
843 for pkg in slot_nodes:
844 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
847 slot_parent_atoms.update(parent_atoms)
851 for pkg in slot_nodes:
853 if self._dynamic_config._allow_backtracking and \
854 pkg in self._dynamic_config._runtime_pkg_mask:
857 "!!! backtracking loop detected: %s %s\n" % \
859 self._dynamic_config._runtime_pkg_mask[pkg]),
860 level=logging.DEBUG, noiselevel=-1)
862 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
863 if parent_atoms is None:
865 self._dynamic_config._parent_atoms[pkg] = parent_atoms
868 for parent_atom in slot_parent_atoms:
869 if parent_atom in parent_atoms:
871 # Use package set for matching since it will match via
872 # PROVIDE when necessary, while match_from_list does not.
873 parent, atom = parent_atom
874 atom_set = InternalPackageSet(
875 initial_atoms=(atom,), allow_repo=True)
876 if atom_set.findAtomForPackage(pkg,
877 modified_use=self._pkg_use_enabled(pkg)):
878 parent_atoms.add(parent_atom)
881 conflict_atoms.setdefault(parent_atom, set()).add(pkg)
884 conflict_pkgs.append(pkg)
886 if conflict_pkgs and \
887 self._dynamic_config._allow_backtracking and \
888 not self._accept_blocker_conflicts():
890 for pkg in conflict_pkgs:
891 if self._slot_conflict_backtrack_abi(pkg,
892 slot_nodes, conflict_atoms):
893 backtrack_infos = self._dynamic_config._backtrack_infos
894 config = backtrack_infos.setdefault("config", {})
895 config.setdefault("slot_conflict_abi", set()).add(pkg)
897 remaining.append(pkg)
899 self._slot_confict_backtrack(root, slot_atom,
900 slot_parent_atoms, remaining)
902 def _slot_confict_backtrack(self, root, slot_atom,
903 all_parents, conflict_pkgs):
905 debug = "--debug" in self._frozen_config.myopts
906 existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
908 # The ordering of backtrack_data can make
909 # a difference here, because both mask actions may lead
910 # to valid, but different, solutions and the one with
911 # 'existing_node' masked is usually the better one. Because
912 # of that, we choose an order such that
913 # the backtracker will first explore the choice with
914 # existing_node masked. The backtracker reverses the
915 # order, so the order it uses is the reverse of the
916 # order shown here. See bug #339606.
917 if existing_node in conflict_pkgs and \
918 existing_node is not conflict_pkgs[-1]:
919 conflict_pkgs.remove(existing_node)
920 conflict_pkgs.append(existing_node)
921 for to_be_masked in conflict_pkgs:
922 # For missed update messages, find out which
923 # atoms matched to_be_selected that did not
924 # match to_be_masked.
926 self._dynamic_config._parent_atoms.get(to_be_masked, set())
927 conflict_atoms = set(parent_atom for parent_atom in all_parents \
928 if parent_atom not in parent_atoms)
929 backtrack_data.append((to_be_masked, conflict_atoms))
931 if len(backtrack_data) > 1:
932 # NOTE: Generally, we prefer to mask the higher
933 # version since this solves common cases in which a
934 # lower version is needed so that all dependencies
935 # will be satisfied (bug #337178). However, if
936 # existing_node happens to be installed then we
937 # mask that since this is a common case that is
938 # triggered when --update is not enabled.
939 if existing_node.installed:
941 elif any(pkg > existing_node for pkg in conflict_pkgs):
942 backtrack_data.reverse()
944 to_be_masked = backtrack_data[-1][0]
946 self._dynamic_config._backtrack_infos.setdefault(
947 "slot conflict", []).append(backtrack_data)
948 self._dynamic_config._need_restart = True
953 msg.append("backtracking due to slot conflict:")
954 msg.append(" first package: %s" % existing_node)
955 msg.append(" package to mask: %s" % to_be_masked)
956 msg.append(" slot: %s" % slot_atom)
957 msg.append(" parents: %s" % ", ".join( \
958 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
960 writemsg_level("".join("%s\n" % l for l in msg),
961 noiselevel=-1, level=logging.DEBUG)
963 def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
965 If one or more conflict atoms have a slot/sub-slot dep that can be resolved
966 by rebuilding the parent package, then schedule the rebuild via
967 backtracking, and return True. Otherwise, return False.
971 for parent_atom, conflict_pkgs in conflict_atoms.items():
972 parent, atom = parent_atom
973 if atom.slot_operator != "=" or not parent.built:
976 if pkg not in conflict_pkgs:
979 for other_pkg in slot_nodes:
980 if other_pkg in conflict_pkgs:
983 dep = Dependency(atom=atom, child=other_pkg,
984 parent=parent, root=pkg.root)
986 if self._slot_operator_update_probe(dep):
987 self._slot_operator_update_backtrack(dep)
992 def _slot_operator_update_backtrack(self, dep, new_child_slot=None):
993 if new_child_slot is None:
996 child = new_child_slot
997 if "--debug" in self._frozen_config.myopts:
1001 msg.append("backtracking due to missed slot abi update:")
1002 msg.append(" child package: %s" % child)
1003 if new_child_slot is not None:
1004 msg.append(" new child slot package: %s" % new_child_slot)
1005 msg.append(" parent package: %s" % dep.parent)
1006 msg.append(" atom: %s" % dep.atom)
1008 writemsg_level("\n".join(msg),
1009 noiselevel=-1, level=logging.DEBUG)
1010 backtrack_infos = self._dynamic_config._backtrack_infos
1011 config = backtrack_infos.setdefault("config", {})
1013 # mask unwanted binary packages if necessary
1015 if new_child_slot is None:
1016 if not child.installed:
1017 abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
1018 if not dep.parent.installed:
1019 abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
1021 config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
1023 # trigger replacement of installed packages if necessary
1024 abi_reinstalls = set()
1025 if dep.parent.installed:
1026 abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
1027 if new_child_slot is None and child.installed:
1028 abi_reinstalls.add((child.root, child.slot_atom))
1030 config.setdefault("slot_operator_replace_installed",
1031 set()).update(abi_reinstalls)
1033 self._dynamic_config._need_restart = True
1035 def _slot_operator_update_probe(self, dep, new_child_slot=False):
1037 slot/sub-slot := operators tend to prevent updates from getting pulled in,
1038 since installed packages pull in packages with the slot/sub-slot that they
1039 were built against. Detect this case so that we can schedule rebuilds
1040 and reinstalls when appropriate.
1041 NOTE: This function only searches for updates that involve upgrades
1042 to higher versions, since the logic required to detect when a
1043 downgrade would be desirable is not implemented.
1046 if dep.child.installed and \
1047 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
1048 modified_use=self._pkg_use_enabled(dep.child)):
1051 if dep.parent.installed and \
1052 self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
1053 modified_use=self._pkg_use_enabled(dep.parent)):
1056 debug = "--debug" in self._frozen_config.myopts
1057 want_downgrade = None
1059 for replacement_parent in self._iter_similar_available(dep.parent,
1060 dep.parent.slot_atom):
1062 for atom in replacement_parent.validated_atoms:
1063 if not atom.slot_operator == "=" or \
1065 atom.cp != dep.atom.cp:
1068 # Discard USE deps, we're only searching for an approximate
1069 # pattern, and dealing with USE states is too complex for
1071 atom = atom.without_use
1073 if replacement_parent.built and \
1074 portage.dep._match_slot(atom, dep.child):
1075 # Our selected replacement_parent appears to be built
1076 # for the existing child selection. So, discard this
1077 # parent and search for another.
1080 for pkg in self._iter_similar_available(
1082 if pkg.slot == dep.child.slot and \
1083 pkg.sub_slot == dep.child.sub_slot:
1084 # If slot/sub-slot is identical, then there's
1085 # no point in updating.
1088 if pkg.slot == dep.child.slot:
1091 # the new slot only matters if the
1092 # package version is higher
1095 if pkg.slot != dep.child.slot:
1098 if want_downgrade is None:
1099 want_downgrade = self._downgrade_probe(dep.child)
1100 # be careful not to trigger a rebuild when
1101 # the only version available with a
1102 # different slot_operator is an older version
1103 if not want_downgrade:
1110 msg.append("slot_operator_update_probe:")
1111 msg.append(" existing child package: %s" % dep.child)
1112 msg.append(" existing parent package: %s" % dep.parent)
1113 msg.append(" new child package: %s" % pkg)
1114 msg.append(" new parent package: %s" % replacement_parent)
1116 writemsg_level("\n".join(msg),
1117 noiselevel=-1, level=logging.DEBUG)
1125 msg.append("slot_operator_update_probe:")
1126 msg.append(" existing child package: %s" % dep.child)
1127 msg.append(" existing parent package: %s" % dep.parent)
1128 msg.append(" new child package: %s" % None)
1129 msg.append(" new parent package: %s" % None)
1131 writemsg_level("\n".join(msg),
1132 noiselevel=-1, level=logging.DEBUG)
1136 def _downgrade_probe(self, pkg):
1138 Detect cases where a downgrade of the given package is considered
1139 desirable due to the current version being masked or unavailable.
1141 available_pkg = None
1142 for available_pkg in self._iter_similar_available(pkg,
1144 if available_pkg >= pkg:
1145 # There's an available package of the same or higher
1146 # version, so downgrade seems undesirable.
1149 return available_pkg is not None
1151 def _iter_similar_available(self, graph_pkg, atom):
1153 Given a package that's in the graph, do a rough check to
1154 see if a similar package is available to install. The given
1155 graph_pkg itself may be yielded only if it's not installed.
1158 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
1159 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
1160 use_ebuild_visibility = self._frozen_config.myopts.get(
1161 '--use-ebuild-visibility', 'n') != 'n'
1163 for pkg in self._iter_match_pkgs_any(
1164 graph_pkg.root_config, atom):
1165 if pkg.cp != graph_pkg.cp:
1166 # discard old-style virtual match
1170 if pkg in self._dynamic_config._runtime_pkg_mask:
1172 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1173 modified_use=self._pkg_use_enabled(pkg)):
1175 if not self._pkg_visibility_check(pkg):
1178 if self._equiv_binary_installed(pkg):
1180 if not (not use_ebuild_visibility and
1181 (usepkgonly or useoldpkg_atoms.findAtomForPackage(
1182 pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
1183 not self._equiv_ebuild_visible(pkg):
1187 def _slot_operator_trigger_reinstalls(self):
1189 Search for packages with slot-operator deps on older slots, and schedule
1190 rebuilds if they can link to a newer slot that's in the graph.
1193 rebuild_if_new_slot = self._dynamic_config.myparams.get(
1194 "rebuild_if_new_slot", "y") == "y"
1196 for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
1198 for dep in slot_info:
1199 if not (dep.child.built and dep.parent and
1200 isinstance(dep.parent, Package) and dep.parent.built):
1203 # Check for slot update first, since we don't want to
1204 # trigger reinstall of the child package when a newer
1205 # slot will be used instead.
1206 if rebuild_if_new_slot:
1207 new_child = self._slot_operator_update_probe(dep,
1208 new_child_slot=True)
1210 self._slot_operator_update_backtrack(dep,
1211 new_child_slot=new_child)
1215 if self._slot_operator_update_probe(dep):
1216 self._slot_operator_update_backtrack(dep)
1219 def _reinstall_for_flags(self, pkg, forced_flags,
1220 orig_use, orig_iuse, cur_use, cur_iuse):
1221 """Return a set of flags that trigger reinstallation, or None if there
1222 are no such flags."""
1224 # binpkg_respect_use: Behave like newuse by default. If newuse is
1225 # False and changed_use is True, then behave like changed_use.
1226 binpkg_respect_use = (pkg.built and
1227 self._dynamic_config.myparams.get("binpkg_respect_use")
1229 newuse = "--newuse" in self._frozen_config.myopts
1230 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
1232 if newuse or (binpkg_respect_use and not changed_use):
1233 flags = set(orig_iuse.symmetric_difference(
1234 cur_iuse).difference(forced_flags))
1235 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
1236 cur_iuse.intersection(cur_use)))
1240 elif changed_use or binpkg_respect_use:
1241 flags = orig_iuse.intersection(orig_use).symmetric_difference(
1242 cur_iuse.intersection(cur_use))
1247 def _create_graph(self, allow_unsatisfied=False):
1248 dep_stack = self._dynamic_config._dep_stack
1249 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
1250 while dep_stack or dep_disjunctive_stack:
1251 self._spinner_update()
1253 dep = dep_stack.pop()
1254 if isinstance(dep, Package):
1255 if not self._add_pkg_deps(dep,
1256 allow_unsatisfied=allow_unsatisfied):
1259 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
1261 if dep_disjunctive_stack:
1262 if not self._pop_disjunction(allow_unsatisfied):
1266 def _expand_set_args(self, input_args, add_to_digraph=False):
1268 Iterate over a list of DependencyArg instances and yield all
1269 instances given in the input together with additional SetArg
1270 instances that are generated from nested sets.
1271 @param input_args: An iterable of DependencyArg instances
1272 @type input_args: Iterable
1273 @param add_to_digraph: If True then add SetArg instances
1274 to the digraph, in order to record parent -> child
1275 relationships from nested sets
1276 @type add_to_digraph: Boolean
1278 @return: All args given in the input together with additional
1279 SetArg instances that are generated from nested sets
1282 traversed_set_args = set()
1284 for arg in input_args:
1285 if not isinstance(arg, SetArg):
1289 root_config = arg.root_config
1290 depgraph_sets = self._dynamic_config.sets[root_config.root]
1293 arg = arg_stack.pop()
1294 if arg in traversed_set_args:
1296 traversed_set_args.add(arg)
1299 self._dynamic_config.digraph.add(arg, None,
1300 priority=BlockerDepPriority.instance)
1304 # Traverse nested sets and add them to the stack
1305 # if they're not already in the graph. Also, graph
1306 # edges between parent and nested sets.
1307 for token in arg.pset.getNonAtoms():
1308 if not token.startswith(SETPREFIX):
1310 s = token[len(SETPREFIX):]
1311 nested_set = depgraph_sets.sets.get(s)
1312 if nested_set is None:
1313 nested_set = root_config.sets.get(s)
1314 if nested_set is not None:
1315 nested_arg = SetArg(arg=token, pset=nested_set,
1316 root_config=root_config)
1317 arg_stack.append(nested_arg)
1319 self._dynamic_config.digraph.add(nested_arg, arg,
1320 priority=BlockerDepPriority.instance)
1321 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
1323 def _add_dep(self, dep, allow_unsatisfied=False):
1324 debug = "--debug" in self._frozen_config.myopts
1325 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
1326 nodeps = "--nodeps" in self._frozen_config.myopts
1328 if not buildpkgonly and \
1330 not dep.collapsed_priority.ignored and \
1331 not dep.collapsed_priority.optional and \
1332 dep.parent not in self._dynamic_config._slot_collision_nodes:
1333 if dep.parent.onlydeps:
1334 # It's safe to ignore blockers if the
1335 # parent is an --onlydeps node.
1337 # The blocker applies to the root where
1338 # the parent is or will be installed.
1339 blocker = Blocker(atom=dep.atom,
1340 eapi=dep.parent.metadata["EAPI"],
1341 priority=dep.priority, root=dep.parent.root)
1342 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
1345 if dep.child is None:
1346 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
1347 onlydeps=dep.onlydeps)
1349 # The caller has selected a specific package
1350 # via self._minimize_packages().
1352 existing_node = self._dynamic_config._slot_pkg_map[
1353 dep.root].get(dep_pkg.slot_atom)
1356 if (dep.collapsed_priority.optional or
1357 dep.collapsed_priority.ignored):
1358 # This is an unnecessary build-time dep.
1360 if allow_unsatisfied:
1361 self._dynamic_config._unsatisfied_deps.append(dep)
1363 self._dynamic_config._unsatisfied_deps_for_display.append(
1364 ((dep.root, dep.atom), {"myparent":dep.parent}))
1366 # The parent node should not already be in
1367 # runtime_pkg_mask, since that would trigger an
1368 # infinite backtracking loop.
1369 if self._dynamic_config._allow_backtracking:
1370 if dep.parent in self._dynamic_config._runtime_pkg_mask:
1373 "!!! backtracking loop detected: %s %s\n" % \
1375 self._dynamic_config._runtime_pkg_mask[
1376 dep.parent]), noiselevel=-1)
1377 elif not self.need_restart():
1378 # Do not backtrack if only USE have to be changed in
1379 # order to satisfy the dependency.
1380 dep_pkg, existing_node = \
1381 self._select_package(dep.root, dep.atom.without_use,
1382 onlydeps=dep.onlydeps)
1384 self._dynamic_config._backtrack_infos["missing dependency"] = dep
1385 self._dynamic_config._need_restart = True
1390 msg.append("backtracking due to unsatisfied dep:")
1391 msg.append(" parent: %s" % dep.parent)
1392 msg.append(" priority: %s" % dep.priority)
1393 msg.append(" root: %s" % dep.root)
1394 msg.append(" atom: %s" % dep.atom)
1396 writemsg_level("".join("%s\n" % l for l in msg),
1397 noiselevel=-1, level=logging.DEBUG)
1401 self._rebuild.add(dep_pkg, dep)
1403 ignore = dep.collapsed_priority.ignored and \
1404 not self._dynamic_config._traverse_ignored_deps
1405 if not ignore and not self._add_pkg(dep_pkg, dep):
1409 def _check_slot_conflict(self, pkg, atom):
1410 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1413 matches = pkg.cpv == existing_node.cpv
1414 if pkg != existing_node and \
1416 # Use package set for matching since it will match via
1417 # PROVIDE when necessary, while match_from_list does not.
1418 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1419 allow_repo=True).findAtomForPackage(existing_node,
1420 modified_use=self._pkg_use_enabled(existing_node)))
1422 return (existing_node, matches)
1424 def _add_pkg(self, pkg, dep):
1426 Adds a package to the depgraph, queues dependencies, and handles
1429 debug = "--debug" in self._frozen_config.myopts
1436 myparent = dep.parent
1437 priority = dep.priority
1439 if priority is None:
1440 priority = DepPriority()
1444 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1445 pkg_use_display(pkg, self._frozen_config.myopts,
1446 modified_use=self._pkg_use_enabled(pkg))),
1447 level=logging.DEBUG, noiselevel=-1)
1448 if isinstance(myparent,
1449 (PackageArg, AtomArg)):
1450 # For PackageArg and AtomArg types, it's
1451 # redundant to display the atom attribute.
1453 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1454 level=logging.DEBUG, noiselevel=-1)
1456 # Display the specific atom from SetArg or
1459 if dep.atom is not dep.atom.unevaluated_atom:
1460 uneval = " (%s)" % (dep.atom.unevaluated_atom,)
1462 "%s%s%s required by %s\n" %
1463 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
1464 level=logging.DEBUG, noiselevel=-1)
1466 # Ensure that the dependencies of the same package
1467 # are never processed more than once.
1468 previously_added = pkg in self._dynamic_config.digraph
1470 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1475 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1476 except portage.exception.InvalidDependString as e:
1477 if not pkg.installed:
1478 # should have been masked before it was selected
1482 # NOTE: REQUIRED_USE checks are delayed until after
1483 # package selection, since we want to prompt the user
1484 # for USE adjustment rather than have REQUIRED_USE
1485 # affect package selection and || dep choices.
1486 if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
1487 eapi_has_required_use(pkg.metadata["EAPI"]):
1488 required_use_is_sat = check_required_use(
1489 pkg.metadata["REQUIRED_USE"],
1490 self._pkg_use_enabled(pkg),
1491 pkg.iuse.is_valid_flag,
1492 eapi=pkg.metadata["EAPI"])
1493 if not required_use_is_sat:
1494 if dep.atom is not None and dep.parent is not None:
1495 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1498 for parent_atom in arg_atoms:
1499 parent, atom = parent_atom
1500 self._add_parent_atom(pkg, parent_atom)
1504 atom = Atom("=" + pkg.cpv)
1505 self._dynamic_config._unsatisfied_deps_for_display.append(
1507 {"myparent" : dep.parent, "show_req_use" : pkg}))
1508 self._dynamic_config._skip_restart = True
1511 if not pkg.onlydeps:
1513 existing_node, existing_node_matches = \
1514 self._check_slot_conflict(pkg, dep.atom)
1515 slot_collision = False
1517 if existing_node_matches:
1518 # The existing node can be reused.
1520 for parent_atom in arg_atoms:
1521 parent, atom = parent_atom
1522 self._dynamic_config.digraph.add(existing_node, parent,
1524 self._add_parent_atom(existing_node, parent_atom)
1525 # If a direct circular dependency is not an unsatisfied
1526 # buildtime dependency then drop it here since otherwise
1527 # it can skew the merge order calculation in an unwanted
1529 if existing_node != myparent or \
1530 (priority.buildtime and not priority.satisfied):
1531 self._dynamic_config.digraph.addnode(existing_node, myparent,
1533 if dep.atom is not None and dep.parent is not None:
1534 self._add_parent_atom(existing_node,
1535 (dep.parent, dep.atom))
1538 self._add_slot_conflict(pkg)
1541 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1542 existing_node, pkg_use_display(existing_node,
1543 self._frozen_config.myopts,
1544 modified_use=self._pkg_use_enabled(existing_node))),
1545 level=logging.DEBUG, noiselevel=-1)
1547 slot_collision = True
1550 # Now add this node to the graph so that self.display()
1551 # can show use flags and --tree portage.output. This node is
1552 # only being partially added to the graph. It must not be
1553 # allowed to interfere with the other nodes that have been
1554 # added. Do not overwrite data for existing nodes in
1555 # self._dynamic_config.mydbapi since that data will be used for blocker
1557 # Even though the graph is now invalid, continue to process
1558 # dependencies so that things like --fetchonly can still
1559 # function despite collisions.
1561 elif not previously_added:
1562 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1563 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1564 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1565 self._dynamic_config._highest_pkg_cache.clear()
1566 self._check_masks(pkg)
1568 if not pkg.installed:
1569 # Allow this package to satisfy old-style virtuals in case it
1570 # doesn't already. Any pre-existing providers will be preferred
1573 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1574 # For consistency, also update the global virtuals.
1575 settings = self._frozen_config.roots[pkg.root].settings
1577 settings.setinst(pkg.cpv, pkg.metadata)
1579 except portage.exception.InvalidDependString:
1580 if not pkg.installed:
1581 # should have been masked before it was selected
1585 self._dynamic_config._set_nodes.add(pkg)
1587 # Do this even when addme is False (--onlydeps) so that the
1588 # parent/child relationship is always known in case
1589 # self._show_slot_collision_notice() needs to be called later.
1590 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1591 if dep.atom is not None and dep.parent is not None:
1592 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1595 for parent_atom in arg_atoms:
1596 parent, atom = parent_atom
1597 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1598 self._add_parent_atom(pkg, parent_atom)
1600 # This section determines whether we go deeper into dependencies or not.
1601 # We want to go deeper on a few occasions:
1602 # Installing package A, we need to make sure package A's deps are met.
1603 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1604 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1605 if arg_atoms and depth > 0:
1606 for parent, atom in arg_atoms:
1607 if parent.reset_depth:
1611 if previously_added and pkg.depth is not None:
1612 depth = min(pkg.depth, depth)
1614 deep = self._dynamic_config.myparams.get("deep", 0)
1615 update = "--update" in self._frozen_config.myopts
1617 dep.want_update = (not self._dynamic_config._complete_mode and
1618 (arg_atoms or update) and
1619 not (deep is not True and depth > deep))
1622 if (not pkg.onlydeps and pkg.built and
1623 dep.atom and dep.atom.slot_operator_built):
1624 self._add_slot_operator_dep(dep)
1626 recurse = deep is True or depth + 1 <= deep
1627 dep_stack = self._dynamic_config._dep_stack
1628 if "recurse" not in self._dynamic_config.myparams:
1630 elif pkg.installed and not recurse:
1631 dep_stack = self._dynamic_config._ignored_deps
1633 self._spinner_update()
1635 if not previously_added:
1636 dep_stack.append(pkg)
1639 def _check_masks(self, pkg):
1641 slot_key = (pkg.root, pkg.slot_atom)
1643 # Check for upgrades in the same slot that are
1644 # masked due to a LICENSE change in a newer
1645 # version that is not masked for any other reason.
1646 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1647 if other_pkg is not None and pkg < other_pkg:
1648 self._dynamic_config._masked_license_updates.add(other_pkg)
1650 def _add_parent_atom(self, pkg, parent_atom):
1651 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1652 if parent_atoms is None:
1653 parent_atoms = set()
1654 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1655 parent_atoms.add(parent_atom)
1657 def _add_slot_operator_dep(self, dep):
1658 slot_key = (dep.root, dep.child.slot_atom)
1659 slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
1660 if slot_info is None:
1662 self._dynamic_config._slot_operator_deps[slot_key] = slot_info
1663 slot_info.append(dep)
1665 def _add_slot_conflict(self, pkg):
1666 self._dynamic_config._slot_collision_nodes.add(pkg)
1667 slot_key = (pkg.slot_atom, pkg.root)
1668 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1669 if slot_nodes is None:
1671 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1672 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1675 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1678 metadata = pkg.metadata
1679 removal_action = "remove" in self._dynamic_config.myparams
1682 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1684 edepend[k] = metadata[k]
1686 if not pkg.built and \
1687 "--buildpkgonly" in self._frozen_config.myopts and \
1688 "deep" not in self._dynamic_config.myparams:
1689 edepend["RDEPEND"] = ""
1690 edepend["PDEPEND"] = ""
1692 ignore_build_time_deps = False
1693 if pkg.built and not removal_action:
1694 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1695 # Pull in build time deps as requested, but marked them as
1696 # "optional" since they are not strictly required. This allows
1697 # more freedom in the merge order calculation for solving
1698 # circular dependencies. Don't convert to PDEPEND since that
1699 # could make --with-bdeps=y less effective if it is used to
1700 # adjust merge order to prevent built_with_use() calls from
1704 ignore_build_time_deps = True
1706 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1707 # Removal actions never traverse ignored buildtime
1708 # dependencies, so it's safe to discard them early.
1709 edepend["DEPEND"] = ""
1710 ignore_build_time_deps = True
1713 depend_root = myroot
1715 depend_root = self._frozen_config._running_root.root
1716 root_deps = self._frozen_config.myopts.get("--root-deps")
1717 if root_deps is not None:
1718 if root_deps is True:
1719 depend_root = myroot
1720 elif root_deps == "rdeps":
1721 ignore_build_time_deps = True
1723 # If rebuild mode is not enabled, it's safe to discard ignored
1724 # build-time dependencies. If you want these deps to be traversed
1725 # in "complete" mode then you need to specify --with-bdeps=y.
1726 if ignore_build_time_deps and \
1727 not self._rebuild.rebuild:
1728 edepend["DEPEND"] = ""
1731 (depend_root, edepend["DEPEND"],
1732 self._priority(buildtime=True,
1733 optional=(pkg.built or ignore_build_time_deps),
1734 ignored=ignore_build_time_deps)),
1735 (myroot, edepend["RDEPEND"],
1736 self._priority(runtime=True)),
1737 (myroot, edepend["PDEPEND"],
1738 self._priority(runtime_post=True))
1741 debug = "--debug" in self._frozen_config.myopts
1743 for dep_root, dep_string, dep_priority in deps:
1747 writemsg_level("\nParent: %s\n" % (pkg,),
1748 noiselevel=-1, level=logging.DEBUG)
1749 writemsg_level("Depstring: %s\n" % (dep_string,),
1750 noiselevel=-1, level=logging.DEBUG)
1751 writemsg_level("Priority: %s\n" % (dep_priority,),
1752 noiselevel=-1, level=logging.DEBUG)
1755 dep_string = portage.dep.use_reduce(dep_string,
1756 uselist=self._pkg_use_enabled(pkg),
1757 is_valid_flag=pkg.iuse.is_valid_flag,
1758 opconvert=True, token_class=Atom,
1759 eapi=pkg.metadata['EAPI'])
1760 except portage.exception.InvalidDependString as e:
1761 if not pkg.installed:
1762 # should have been masked before it was selected
1766 # Try again, but omit the is_valid_flag argument, since
1767 # invalid USE conditionals are a common problem and it's
1768 # practical to ignore this issue for installed packages.
1770 dep_string = portage.dep.use_reduce(dep_string,
1771 uselist=self._pkg_use_enabled(pkg),
1772 opconvert=True, token_class=Atom,
1773 eapi=pkg.metadata['EAPI'])
1774 except portage.exception.InvalidDependString as e:
1775 self._dynamic_config._masked_installed.add(pkg)
1780 dep_string = list(self._queue_disjunctive_deps(
1781 pkg, dep_root, dep_priority, dep_string))
1782 except portage.exception.InvalidDependString as e:
1784 self._dynamic_config._masked_installed.add(pkg)
1788 # should have been masked before it was selected
1794 if not self._add_pkg_dep_string(
1795 pkg, dep_root, dep_priority, dep_string,
1799 self._dynamic_config._traversed_pkg_deps.add(pkg)
1802 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1804 _autounmask_backup = self._dynamic_config._autounmask
1805 if dep_priority.optional or dep_priority.ignored:
1806 # Temporarily disable autounmask for deps that
1807 # don't necessarily need to be satisfied.
1808 self._dynamic_config._autounmask = False
1810 return self._wrapped_add_pkg_dep_string(
1811 pkg, dep_root, dep_priority, dep_string,
1814 self._dynamic_config._autounmask = _autounmask_backup
1816 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1817 dep_string, allow_unsatisfied):
1818 depth = pkg.depth + 1
1819 deep = self._dynamic_config.myparams.get("deep", 0)
1820 recurse_satisfied = deep is True or depth <= deep
1821 debug = "--debug" in self._frozen_config.myopts
1822 strict = pkg.type_name != "installed"
1825 writemsg_level("\nParent: %s\n" % (pkg,),
1826 noiselevel=-1, level=logging.DEBUG)
1827 dep_repr = portage.dep.paren_enclose(dep_string,
1828 unevaluated_atom=True, opconvert=True)
1829 writemsg_level("Depstring: %s\n" % (dep_repr,),
1830 noiselevel=-1, level=logging.DEBUG)
1831 writemsg_level("Priority: %s\n" % (dep_priority,),
1832 noiselevel=-1, level=logging.DEBUG)
1835 selected_atoms = self._select_atoms(dep_root,
1836 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1837 strict=strict, priority=dep_priority)
1838 except portage.exception.InvalidDependString:
1840 self._dynamic_config._masked_installed.add(pkg)
1843 # should have been masked before it was selected
1847 writemsg_level("Candidates: %s\n" % \
1848 ([str(x) for x in selected_atoms[pkg]],),
1849 noiselevel=-1, level=logging.DEBUG)
1851 root_config = self._frozen_config.roots[dep_root]
1852 vardb = root_config.trees["vartree"].dbapi
1853 traversed_virt_pkgs = set()
1855 reinstall_atoms = self._frozen_config.reinstall_atoms
1856 for atom, child in self._minimize_children(
1857 pkg, dep_priority, root_config, selected_atoms[pkg]):
1859 # If this was a specially generated virtual atom
1860 # from dep_check, map it back to the original, in
1861 # order to avoid distortion in places like display
1862 # or conflict resolution code.
1863 is_virt = hasattr(atom, '_orig_atom')
1864 atom = getattr(atom, '_orig_atom', atom)
1866 if atom.blocker and \
1867 (dep_priority.optional or dep_priority.ignored):
1868 # For --with-bdeps, ignore build-time only blockers
1869 # that originate from built packages.
1872 mypriority = dep_priority.copy()
1873 if not atom.blocker:
1874 inst_pkgs = [inst_pkg for inst_pkg in
1875 reversed(vardb.match_pkgs(atom))
1876 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1877 modified_use=self._pkg_use_enabled(inst_pkg))]
1879 for inst_pkg in inst_pkgs:
1880 if self._pkg_visibility_check(inst_pkg):
1882 mypriority.satisfied = inst_pkg
1884 if not mypriority.satisfied:
1885 # none visible, so use highest
1886 mypriority.satisfied = inst_pkgs[0]
1888 dep = Dependency(atom=atom,
1889 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1890 priority=mypriority, root=dep_root)
1892 # In some cases, dep_check will return deps that shouldn't
1893 # be proccessed any further, so they are identified and
1894 # discarded here. Try to discard as few as possible since
1895 # discarded dependencies reduce the amount of information
1896 # available for optimization of merge order.
1898 if not atom.blocker and \
1899 not recurse_satisfied and \
1900 mypriority.satisfied and \
1901 mypriority.satisfied.visible and \
1902 dep.child is not None and \
1903 not dep.child.installed and \
1904 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1905 dep.child.slot_atom) is None:
1908 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
1909 except InvalidDependString:
1910 if not dep.child.installed:
1914 # Existing child selection may not be valid unless
1915 # it's added to the graph immediately, since "complete"
1916 # mode may select a different child later.
1919 self._dynamic_config._ignored_deps.append(dep)
1922 if dep_priority.ignored and \
1923 not self._dynamic_config._traverse_ignored_deps:
1924 if is_virt and dep.child is not None:
1925 traversed_virt_pkgs.add(dep.child)
1927 self._dynamic_config._ignored_deps.append(dep)
1929 if not self._add_dep(dep,
1930 allow_unsatisfied=allow_unsatisfied):
1932 if is_virt and dep.child is not None:
1933 traversed_virt_pkgs.add(dep.child)
1935 selected_atoms.pop(pkg)
1937 # Add selected indirect virtual deps to the graph. This
1938 # takes advantage of circular dependency avoidance that's done
1939 # by dep_zapdeps. We preserve actual parent/child relationships
1940 # here in order to avoid distorting the dependency graph like
1941 # <=portage-2.1.6.x did.
1942 for virt_dep, atoms in selected_atoms.items():
1944 virt_pkg = virt_dep.child
1945 if virt_pkg not in traversed_virt_pkgs:
1949 writemsg_level("\nCandidates: %s: %s\n" % \
1950 (virt_pkg.cpv, [str(x) for x in atoms]),
1951 noiselevel=-1, level=logging.DEBUG)
1953 if not dep_priority.ignored or \
1954 self._dynamic_config._traverse_ignored_deps:
1956 inst_pkgs = [inst_pkg for inst_pkg in
1957 reversed(vardb.match_pkgs(virt_dep.atom))
1958 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1959 modified_use=self._pkg_use_enabled(inst_pkg))]
1961 for inst_pkg in inst_pkgs:
1962 if self._pkg_visibility_check(inst_pkg):
1964 virt_dep.priority.satisfied = inst_pkg
1966 if not virt_dep.priority.satisfied:
1967 # none visible, so use highest
1968 virt_dep.priority.satisfied = inst_pkgs[0]
1970 if not self._add_pkg(virt_pkg, virt_dep):
1973 for atom, child in self._minimize_children(
1974 pkg, self._priority(runtime=True), root_config, atoms):
1976 # If this was a specially generated virtual atom
1977 # from dep_check, map it back to the original, in
1978 # order to avoid distortion in places like display
1979 # or conflict resolution code.
1980 is_virt = hasattr(atom, '_orig_atom')
1981 atom = getattr(atom, '_orig_atom', atom)
1983 # This is a GLEP 37 virtual, so its deps are all runtime.
1984 mypriority = self._priority(runtime=True)
1985 if not atom.blocker:
1986 inst_pkgs = [inst_pkg for inst_pkg in
1987 reversed(vardb.match_pkgs(atom))
1988 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1989 modified_use=self._pkg_use_enabled(inst_pkg))]
1991 for inst_pkg in inst_pkgs:
1992 if self._pkg_visibility_check(inst_pkg):
1994 mypriority.satisfied = inst_pkg
1996 if not mypriority.satisfied:
1997 # none visible, so use highest
1998 mypriority.satisfied = inst_pkgs[0]
2000 # Dependencies of virtuals are considered to have the
2001 # same depth as the virtual itself.
2002 dep = Dependency(atom=atom,
2003 blocker=atom.blocker, child=child, depth=virt_dep.depth,
2004 parent=virt_pkg, priority=mypriority, root=dep_root,
2005 collapsed_parent=pkg, collapsed_priority=dep_priority)
2008 if not atom.blocker and \
2009 not recurse_satisfied and \
2010 mypriority.satisfied and \
2011 mypriority.satisfied.visible and \
2012 dep.child is not None and \
2013 not dep.child.installed and \
2014 self._dynamic_config._slot_pkg_map[dep.child.root].get(
2015 dep.child.slot_atom) is None:
2018 myarg = next(self._iter_atoms_for_pkg(dep.child), None)
2019 except InvalidDependString:
2020 if not dep.child.installed:
2026 self._dynamic_config._ignored_deps.append(dep)
2029 if dep_priority.ignored and \
2030 not self._dynamic_config._traverse_ignored_deps:
2031 if is_virt and dep.child is not None:
2032 traversed_virt_pkgs.add(dep.child)
2034 self._dynamic_config._ignored_deps.append(dep)
2036 if not self._add_dep(dep,
2037 allow_unsatisfied=allow_unsatisfied):
2039 if is_virt and dep.child is not None:
2040 traversed_virt_pkgs.add(dep.child)
2043 writemsg_level("\nExiting... %s\n" % (pkg,),
2044 noiselevel=-1, level=logging.DEBUG)
2048 def _minimize_children(self, parent, priority, root_config, atoms):
2050 Selects packages to satisfy the given atoms, and minimizes the
2051 number of selected packages. This serves to identify and eliminate
2052 redundant package selections when multiple atoms happen to specify
2062 dep_pkg, existing_node = self._select_package(
2063 root_config.root, atom)
2067 atom_pkg_map[atom] = dep_pkg
2069 if len(atom_pkg_map) < 2:
2070 for item in atom_pkg_map.items():
2076 for atom, pkg in atom_pkg_map.items():
2077 pkg_atom_map.setdefault(pkg, set()).add(atom)
2078 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
2080 for pkgs in cp_pkg_map.values():
2083 for atom in pkg_atom_map[pkg]:
2087 # Use a digraph to identify and eliminate any
2088 # redundant package selections.
2089 atom_pkg_graph = digraph()
2092 for atom in pkg_atom_map[pkg1]:
2094 atom_pkg_graph.add(pkg1, atom)
2095 atom_set = InternalPackageSet(initial_atoms=(atom,),
2100 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
2101 atom_pkg_graph.add(pkg2, atom)
2104 eliminate_pkg = True
2105 for atom in atom_pkg_graph.parent_nodes(pkg):
2106 if len(atom_pkg_graph.child_nodes(atom)) < 2:
2107 eliminate_pkg = False
2110 atom_pkg_graph.remove(pkg)
2112 # Yield ~, =*, < and <= atoms first, since those are more likely to
2113 # cause slot conflicts, and we want those atoms to be displayed
2114 # in the resulting slot conflict message (see bug #291142).
2115 # Give similar treatment to slot/sub-slot atoms.
2119 for atom in cp_atoms:
2120 if atom.slot_operator_built:
2121 abi_atoms.append(atom)
2124 for child_pkg in atom_pkg_graph.child_nodes(atom):
2125 existing_node, matches = \
2126 self._check_slot_conflict(child_pkg, atom)
2127 if existing_node and not matches:
2131 conflict_atoms.append(atom)
2133 normal_atoms.append(atom)
2135 for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
2136 child_pkgs = atom_pkg_graph.child_nodes(atom)
2137 # if more than one child, yield highest version
2138 if len(child_pkgs) > 1:
2140 yield (atom, child_pkgs[-1])
2142 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2144 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
2145 Yields non-disjunctive deps. Raises InvalidDependString when
2148 for x in dep_struct:
2149 if isinstance(x, list):
2150 if x and x[0] == "||":
2151 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2153 for y in self._queue_disjunctive_deps(
2154 pkg, dep_root, dep_priority, x):
2157 # Note: Eventually this will check for PROPERTIES=virtual
2158 # or whatever other metadata gets implemented for this
2160 if x.cp.startswith('virtual/'):
2161 self._queue_disjunction(pkg, dep_root, dep_priority, [x])
2165 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2166 self._dynamic_config._dep_disjunctive_stack.append(
2167 (pkg, dep_root, dep_priority, dep_struct))
2169 def _pop_disjunction(self, allow_unsatisfied):
2171 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
2172 populate self._dynamic_config._dep_stack.
2174 pkg, dep_root, dep_priority, dep_struct = \
2175 self._dynamic_config._dep_disjunctive_stack.pop()
2176 if not self._add_pkg_dep_string(
2177 pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
2181 def _priority(self, **kwargs):
2182 if "remove" in self._dynamic_config.myparams:
2183 priority_constructor = UnmergeDepPriority
2185 priority_constructor = DepPriority
2186 return priority_constructor(**kwargs)
2188 def _dep_expand(self, root_config, atom_without_category):
2190 @param root_config: a root config instance
2191 @type root_config: RootConfig
2192 @param atom_without_category: an atom without a category component
2193 @type atom_without_category: String
2195 @return: a list of atoms containing categories (possibly empty)
2197 null_cp = portage.dep_getkey(insert_category_into_atom(
2198 atom_without_category, "null"))
2199 cat, atom_pn = portage.catsplit(null_cp)
2201 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
2203 for db, pkg_type, built, installed, db_keys in dbs:
2204 for cat in db.categories:
2205 if db.cp_list("%s/%s" % (cat, atom_pn)):
2209 for cat in categories:
2210 deps.append(Atom(insert_category_into_atom(
2211 atom_without_category, cat), allow_repo=True))
2214 def _have_new_virt(self, root, atom_cp):
2216 for db, pkg_type, built, installed, db_keys in \
2217 self._dynamic_config._filtered_trees[root]["dbs"]:
2218 if db.cp_list(atom_cp):
2223 def _iter_atoms_for_pkg(self, pkg):
2224 depgraph_sets = self._dynamic_config.sets[pkg.root]
2225 atom_arg_map = depgraph_sets.atom_arg_map
2226 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
2227 if atom.cp != pkg.cp and \
2228 self._have_new_virt(pkg.root, atom.cp):
2231 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
2232 visible_pkgs.reverse() # descending order
2234 for visible_pkg in visible_pkgs:
2235 if visible_pkg.cp != atom.cp:
2237 if pkg >= visible_pkg:
2238 # This is descending order, and we're not
2239 # interested in any versions <= pkg given.
2241 if pkg.slot_atom != visible_pkg.slot_atom:
2242 higher_slot = visible_pkg
2244 if higher_slot is not None:
2246 for arg in atom_arg_map[(atom, pkg.root)]:
2247 if isinstance(arg, PackageArg) and \
2252 def select_files(self, myfiles):
2253 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
2254 self._dynamic_config._initial_arg_list and call self._resolve to create the
2255 appropriate depgraph and return a favorite list."""
2257 debug = "--debug" in self._frozen_config.myopts
2258 root_config = self._frozen_config.roots[self._frozen_config.target_root]
2259 sets = root_config.sets
2260 depgraph_sets = self._dynamic_config.sets[root_config.root]
2262 eroot = root_config.root
2263 root = root_config.settings['ROOT']
2264 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
2265 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
2266 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
2267 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
2268 pkgsettings = self._frozen_config.pkgsettings[eroot]
2270 onlydeps = "--onlydeps" in self._frozen_config.myopts
2273 ext = os.path.splitext(x)[1]
2275 if not os.path.exists(x):
2277 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2278 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2279 elif os.path.exists(
2280 os.path.join(pkgsettings["PKGDIR"], x)):
2281 x = os.path.join(pkgsettings["PKGDIR"], x)
2283 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
2284 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
2285 return 0, myfavorites
2286 mytbz2=portage.xpak.tbz2(x)
2287 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2288 if os.path.realpath(x) != \
2289 os.path.realpath(bindb.bintree.getname(mykey)):
2290 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2291 self._dynamic_config._skip_restart = True
2292 return 0, myfavorites
2294 pkg = self._pkg(mykey, "binary", root_config,
2296 args.append(PackageArg(arg=x, package=pkg,
2297 root_config=root_config))
2298 elif ext==".ebuild":
2299 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2300 pkgdir = os.path.dirname(ebuild_path)
2301 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2302 cp = pkgdir[len(tree_root)+1:]
2303 e = portage.exception.PackageNotFound(
2304 ("%s is not in a valid portage tree " + \
2305 "hierarchy or does not exist") % x)
2306 if not portage.isvalidatom(cp):
2308 cat = portage.catsplit(cp)[0]
2309 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2310 if not portage.isvalidatom("="+mykey):
2312 ebuild_path = portdb.findname(mykey)
2314 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2315 cp, os.path.basename(ebuild_path)):
2316 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2317 self._dynamic_config._skip_restart = True
2318 return 0, myfavorites
2319 if mykey not in portdb.xmatch(
2320 "match-visible", portage.cpv_getkey(mykey)):
2321 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2322 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2323 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2324 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2327 raise portage.exception.PackageNotFound(
2328 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2329 pkg = self._pkg(mykey, "ebuild", root_config,
2330 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2331 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2332 args.append(PackageArg(arg=x, package=pkg,
2333 root_config=root_config))
2334 elif x.startswith(os.path.sep):
2335 if not x.startswith(eroot):
2336 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2337 " $EROOT.\n") % x, noiselevel=-1)
2338 self._dynamic_config._skip_restart = True
2340 # Queue these up since it's most efficient to handle
2341 # multiple files in a single iter_owners() call.
2342 lookup_owners.append(x)
2343 elif x.startswith("." + os.sep) or \
2344 x.startswith(".." + os.sep):
2345 f = os.path.abspath(x)
2346 if not f.startswith(eroot):
2347 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2348 " $EROOT.\n") % (f, x), noiselevel=-1)
2349 self._dynamic_config._skip_restart = True
2351 lookup_owners.append(f)
2353 if x in ("system", "world"):
2355 if x.startswith(SETPREFIX):
2356 s = x[len(SETPREFIX):]
2358 raise portage.exception.PackageSetNotFound(s)
2359 if s in depgraph_sets.sets:
2362 depgraph_sets.sets[s] = pset
2363 args.append(SetArg(arg=x, pset=pset,
2364 root_config=root_config))
2366 if not is_valid_package_atom(x, allow_repo=True):
2367 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2369 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2370 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2371 self._dynamic_config._skip_restart = True
2373 # Don't expand categories or old-style virtuals here unless
2374 # necessary. Expansion of old-style virtuals here causes at
2375 # least the following problems:
2376 # 1) It's more difficult to determine which set(s) an atom
2377 # came from, if any.
2378 # 2) It takes away freedom from the resolver to choose other
2379 # possible expansions when necessary.
2381 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2382 root_config=root_config))
2384 expanded_atoms = self._dep_expand(root_config, x)
2385 installed_cp_set = set()
2386 for atom in expanded_atoms:
2387 if vardb.cp_list(atom.cp):
2388 installed_cp_set.add(atom.cp)
2390 if len(installed_cp_set) > 1:
2391 non_virtual_cps = set()
2392 for atom_cp in installed_cp_set:
2393 if not atom_cp.startswith("virtual/"):
2394 non_virtual_cps.add(atom_cp)
2395 if len(non_virtual_cps) == 1:
2396 installed_cp_set = non_virtual_cps
2398 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2399 installed_cp = next(iter(installed_cp_set))
2400 for atom in expanded_atoms:
2401 if atom.cp == installed_cp:
2403 for pkg in self._iter_match_pkgs_any(
2404 root_config, atom.without_use,
2406 if not pkg.installed:
2410 expanded_atoms = [atom]
2413 # If a non-virtual package and one or more virtual packages
2414 # are in expanded_atoms, use the non-virtual package.
2415 if len(expanded_atoms) > 1:
2416 number_of_virtuals = 0
2417 for expanded_atom in expanded_atoms:
2418 if expanded_atom.cp.startswith("virtual/"):
2419 number_of_virtuals += 1
2421 candidate = expanded_atom
2422 if len(expanded_atoms) - number_of_virtuals == 1:
2423 expanded_atoms = [ candidate ]
2425 if len(expanded_atoms) > 1:
2426 writemsg("\n\n", noiselevel=-1)
2427 ambiguous_package_name(x, expanded_atoms, root_config,
2428 self._frozen_config.spinner, self._frozen_config.myopts)
2429 self._dynamic_config._skip_restart = True
2430 return False, myfavorites
2432 atom = expanded_atoms[0]
2434 null_atom = Atom(insert_category_into_atom(x, "null"),
2436 cat, atom_pn = portage.catsplit(null_atom.cp)
2437 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2439 # Allow the depgraph to choose which virtual.
2440 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2445 if atom.use and atom.use.conditional:
2447 ("\n\n!!! '%s' contains a conditional " + \
2448 "which is not allowed.\n") % (x,), noiselevel=-1)
2449 writemsg("!!! Please check ebuild(5) for full details.\n")
2450 self._dynamic_config._skip_restart = True
2453 args.append(AtomArg(arg=x, atom=atom,
2454 root_config=root_config))
2458 search_for_multiple = False
2459 if len(lookup_owners) > 1:
2460 search_for_multiple = True
2462 for x in lookup_owners:
2463 if not search_for_multiple and os.path.isdir(x):
2464 search_for_multiple = True
2465 relative_paths.append(x[len(root)-1:])
2468 for pkg, relative_path in \
2469 real_vardb._owners.iter_owners(relative_paths):
2470 owners.add(pkg.mycpv)
2471 if not search_for_multiple:
2475 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2476 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2477 self._dynamic_config._skip_restart = True
2481 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2483 # portage now masks packages with missing slot, but it's
2484 # possible that one was installed by an older version
2485 atom = Atom(portage.cpv_getkey(cpv))
2487 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2488 args.append(AtomArg(arg=atom, atom=atom,
2489 root_config=root_config))
2491 if "--update" in self._frozen_config.myopts:
2492 # In some cases, the greedy slots behavior can pull in a slot that
2493 # the user would want to uninstall due to it being blocked by a
2494 # newer version in a different slot. Therefore, it's necessary to
2495 # detect and discard any that should be uninstalled. Each time
2496 # that arguments are updated, package selections are repeated in
2497 # order to ensure consistency with the current arguments:
2499 # 1) Initialize args
2500 # 2) Select packages and generate initial greedy atoms
2501 # 3) Update args with greedy atoms
2502 # 4) Select packages and generate greedy atoms again, while
2503 # accounting for any blockers between selected packages
2504 # 5) Update args with revised greedy atoms
2506 self._set_args(args)
2509 greedy_args.append(arg)
2510 if not isinstance(arg, AtomArg):
2512 for atom in self._greedy_slots(arg.root_config, arg.atom):
2514 AtomArg(arg=arg.arg, atom=atom,
2515 root_config=arg.root_config))
2517 self._set_args(greedy_args)
2520 # Revise greedy atoms, accounting for any blockers
2521 # between selected packages.
2522 revised_greedy_args = []
2524 revised_greedy_args.append(arg)
2525 if not isinstance(arg, AtomArg):
2527 for atom in self._greedy_slots(arg.root_config, arg.atom,
2528 blocker_lookahead=True):
2529 revised_greedy_args.append(
2530 AtomArg(arg=arg.arg, atom=atom,
2531 root_config=arg.root_config))
2532 args = revised_greedy_args
2533 del revised_greedy_args
2535 args.extend(self._gen_reinstall_sets())
2536 self._set_args(args)
2538 myfavorites = set(myfavorites)
2540 if isinstance(arg, (AtomArg, PackageArg)):
2541 myfavorites.add(arg.atom)
2542 elif isinstance(arg, SetArg):
2543 if not arg.internal:
2544 myfavorites.add(arg.arg)
2545 myfavorites = list(myfavorites)
2548 portage.writemsg("\n", noiselevel=-1)
2549 # Order needs to be preserved since a feature of --nodeps
2550 # is to allow the user to force a specific merge order.
2551 self._dynamic_config._initial_arg_list = args[:]
2553 return self._resolve(myfavorites)
2555 def _gen_reinstall_sets(self):
2558 for root, atom in self._rebuild.rebuild_list:
2559 atom_list.append((root, '__auto_rebuild__', atom))
2560 for root, atom in self._rebuild.reinstall_list:
2561 atom_list.append((root, '__auto_reinstall__', atom))
2562 for root, atom in self._dynamic_config._slot_operator_replace_installed:
2563 atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
2566 for root, set_name, atom in atom_list:
2567 set_dict.setdefault((root, set_name), []).append(atom)
2569 for (root, set_name), atoms in set_dict.items():
2570 yield SetArg(arg=(SETPREFIX + set_name),
2571 # Set reset_depth=False here, since we don't want these
2572 # special sets to interact with depth calculations (see
2573 # the emerge --deep=DEPTH option), though we want them
2574 # to behave like normal arguments in most other respects.
2575 pset=InternalPackageSet(initial_atoms=atoms),
2576 force_reinstall=True,
2579 root_config=self._frozen_config.roots[root])
2581 def _resolve(self, myfavorites):
2582 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2583 call self._creategraph to process theier deps and return
2585 debug = "--debug" in self._frozen_config.myopts
2586 onlydeps = "--onlydeps" in self._frozen_config.myopts
2587 myroot = self._frozen_config.target_root
2588 pkgsettings = self._frozen_config.pkgsettings[myroot]
2589 pprovideddict = pkgsettings.pprovideddict
2590 virtuals = pkgsettings.getvirtuals()
2591 args = self._dynamic_config._initial_arg_list[:]
2593 for arg in self._expand_set_args(args, add_to_digraph=True):
2594 for atom in arg.pset.getAtoms():
2595 self._spinner_update()
2596 dep = Dependency(atom=atom, onlydeps=onlydeps,
2597 root=myroot, parent=arg)
2599 pprovided = pprovideddict.get(atom.cp)
2600 if pprovided and portage.match_from_list(atom, pprovided):
2601 # A provided package has been specified on the command line.
2602 self._dynamic_config._pprovided_args.append((arg, atom))
2604 if isinstance(arg, PackageArg):
2605 if not self._add_pkg(arg.package, dep) or \
2606 not self._create_graph():
2607 if not self.need_restart():
2608 sys.stderr.write(("\n\n!!! Problem " + \
2609 "resolving dependencies for %s\n") % \
2611 return 0, myfavorites
2614 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2615 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2616 pkg, existing_node = self._select_package(
2617 myroot, atom, onlydeps=onlydeps)
2619 pprovided_match = False
2620 for virt_choice in virtuals.get(atom.cp, []):
2621 expanded_atom = portage.dep.Atom(
2622 atom.replace(atom.cp, virt_choice.cp, 1))
2623 pprovided = pprovideddict.get(expanded_atom.cp)
2625 portage.match_from_list(expanded_atom, pprovided):
2626 # A provided package has been
2627 # specified on the command line.
2628 self._dynamic_config._pprovided_args.append((arg, atom))
2629 pprovided_match = True
2634 if not (isinstance(arg, SetArg) and \
2635 arg.name in ("selected", "system", "world")):
2636 self._dynamic_config._unsatisfied_deps_for_display.append(
2637 ((myroot, atom), {"myparent" : arg}))
2638 return 0, myfavorites
2640 self._dynamic_config._missing_args.append((arg, atom))
2642 if atom.cp != pkg.cp:
2643 # For old-style virtuals, we need to repeat the
2644 # package.provided check against the selected package.
2645 expanded_atom = atom.replace(atom.cp, pkg.cp)
2646 pprovided = pprovideddict.get(pkg.cp)
2648 portage.match_from_list(expanded_atom, pprovided):
2649 # A provided package has been
2650 # specified on the command line.
2651 self._dynamic_config._pprovided_args.append((arg, atom))
2653 if pkg.installed and \
2654 "selective" not in self._dynamic_config.myparams and \
2655 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2656 pkg, modified_use=self._pkg_use_enabled(pkg)):
2657 self._dynamic_config._unsatisfied_deps_for_display.append(
2658 ((myroot, atom), {"myparent" : arg}))
2659 # Previous behavior was to bail out in this case, but
2660 # since the dep is satisfied by the installed package,
2661 # it's more friendly to continue building the graph
2662 # and just show a warning message. Therefore, only bail
2663 # out here if the atom is not from either the system or
2665 if not (isinstance(arg, SetArg) and \
2666 arg.name in ("selected", "system", "world")):
2667 return 0, myfavorites
2669 # Add the selected package to the graph as soon as possible
2670 # so that later dep_check() calls can use it as feedback
2671 # for making more consistent atom selections.
2672 if not self._add_pkg(pkg, dep):
2673 if self.need_restart():
2675 elif isinstance(arg, SetArg):
2676 writemsg(("\n\n!!! Problem resolving " + \
2677 "dependencies for %s from %s\n") % \
2678 (atom, arg.arg), noiselevel=-1)
2680 writemsg(("\n\n!!! Problem resolving " + \
2681 "dependencies for %s\n") % \
2682 (atom,), noiselevel=-1)
2683 return 0, myfavorites
2685 except SystemExit as e:
2686 raise # Needed else can't exit
2687 except Exception as e:
2688 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2689 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2692 # Now that the root packages have been added to the graph,
2693 # process the dependencies.
2694 if not self._create_graph():
2695 return 0, myfavorites
2699 except self._unknown_internal_error:
2700 return False, myfavorites
2702 if (self._dynamic_config._slot_collision_info and
2703 not self._accept_blocker_conflicts()) or \
2704 (self._dynamic_config._allow_backtracking and
2705 "slot conflict" in self._dynamic_config._backtrack_infos):
2706 return False, myfavorites
2708 if self._rebuild.trigger_rebuilds():
2709 backtrack_infos = self._dynamic_config._backtrack_infos
2710 config = backtrack_infos.setdefault("config", {})
2711 config["rebuild_list"] = self._rebuild.rebuild_list
2712 config["reinstall_list"] = self._rebuild.reinstall_list
2713 self._dynamic_config._need_restart = True
2714 return False, myfavorites
2716 if "config" in self._dynamic_config._backtrack_infos and \
2717 ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
2718 "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
2719 self.need_restart():
2720 return False, myfavorites
2722 # Any failures except those due to autounmask *alone* should return
2723 # before this point, since the success_without_autounmask flag that's
2724 # set below is reserved for cases where there are *zero* other
2725 # problems. For reference, see backtrack_depgraph, where it skips the
2726 # get_best_run() call when success_without_autounmask is True.
2728 digraph_nodes = self._dynamic_config.digraph.nodes
2730 if any(x in digraph_nodes for x in
2731 self._dynamic_config._needed_unstable_keywords) or \
2732 any(x in digraph_nodes for x in
2733 self._dynamic_config._needed_p_mask_changes) or \
2734 any(x in digraph_nodes for x in
2735 self._dynamic_config._needed_use_config_changes) or \
2736 any(x in digraph_nodes for x in
2737 self._dynamic_config._needed_license_changes) :
2738 #We failed if the user needs to change the configuration
2739 self._dynamic_config._success_without_autounmask = True
2740 return False, myfavorites
2742 # We're true here unless we are missing binaries.
2743 return (True, myfavorites)
2745 def _set_args(self, args):
2747 Create the "__non_set_args__" package set from atoms and packages given as
2748 arguments. This method can be called multiple times if necessary.
2749 The package selection cache is automatically invalidated, since
2750 arguments influence package selections.
2755 for root in self._dynamic_config.sets:
2756 depgraph_sets = self._dynamic_config.sets[root]
2757 depgraph_sets.sets.setdefault('__non_set_args__',
2758 InternalPackageSet(allow_repo=True)).clear()
2759 depgraph_sets.atoms.clear()
2760 depgraph_sets.atom_arg_map.clear()
2761 set_atoms[root] = []
2762 non_set_atoms[root] = []
2764 # We don't add set args to the digraph here since that
2765 # happens at a later stage and we don't want to make
2766 # any state changes here that aren't reversed by a
2767 # another call to this method.
2768 for arg in self._expand_set_args(args, add_to_digraph=False):
2769 atom_arg_map = self._dynamic_config.sets[
2770 arg.root_config.root].atom_arg_map
2771 if isinstance(arg, SetArg):
2772 atom_group = set_atoms[arg.root_config.root]
2774 atom_group = non_set_atoms[arg.root_config.root]
2776 for atom in arg.pset.getAtoms():
2777 atom_group.append(atom)
2778 atom_key = (atom, arg.root_config.root)
2779 refs = atom_arg_map.get(atom_key)
2782 atom_arg_map[atom_key] = refs
2786 for root in self._dynamic_config.sets:
2787 depgraph_sets = self._dynamic_config.sets[root]
2788 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2789 non_set_atoms.get(root, [])))
2790 depgraph_sets.sets['__non_set_args__'].update(
2791 non_set_atoms.get(root, []))
2793 # Invalidate the package selection cache, since
2794 # arguments influence package selections.
2795 self._dynamic_config._highest_pkg_cache.clear()
2796 for trees in self._dynamic_config._filtered_trees.values():
2797 trees["porttree"].dbapi._clear_cache()
2799 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2801 Return a list of slot atoms corresponding to installed slots that
2802 differ from the slot of the highest visible match. When
2803 blocker_lookahead is True, slot atoms that would trigger a blocker
2804 conflict are automatically discarded, potentially allowing automatic
2805 uninstallation of older slots when appropriate.
2807 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2808 if highest_pkg is None:
2810 vardb = root_config.trees["vartree"].dbapi
2812 for cpv in vardb.match(atom):
2813 # don't mix new virtuals with old virtuals
2814 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2815 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2817 slots.add(highest_pkg.metadata["SLOT"])
2821 slots.remove(highest_pkg.metadata["SLOT"])
2824 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2825 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2826 if pkg is not None and \
2827 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2828 greedy_pkgs.append(pkg)
2831 if not blocker_lookahead:
2832 return [pkg.slot_atom for pkg in greedy_pkgs]
2835 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2836 for pkg in greedy_pkgs + [highest_pkg]:
2837 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2839 selected_atoms = self._select_atoms(
2840 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2841 parent=pkg, strict=True)
2842 except portage.exception.InvalidDependString:
2845 for atoms in selected_atoms.values():
2846 blocker_atoms.extend(x for x in atoms if x.blocker)
2847 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2849 if highest_pkg not in blockers:
2852 # filter packages with invalid deps
2853 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2855 # filter packages that conflict with highest_pkg
2856 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2857 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2858 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2863 # If two packages conflict, discard the lower version.
2864 discard_pkgs = set()
2865 greedy_pkgs.sort(reverse=True)
2866 for i in range(len(greedy_pkgs) - 1):
2867 pkg1 = greedy_pkgs[i]
2868 if pkg1 in discard_pkgs:
2870 for j in range(i + 1, len(greedy_pkgs)):
2871 pkg2 = greedy_pkgs[j]
2872 if pkg2 in discard_pkgs:
2874 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2875 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2877 discard_pkgs.add(pkg2)
2879 return [pkg.slot_atom for pkg in greedy_pkgs \
2880 if pkg not in discard_pkgs]
2882 def _select_atoms_from_graph(self, *pargs, **kwargs):
2884 Prefer atoms matching packages that have already been
2885 added to the graph or those that are installed and have
2886 not been scheduled for replacement.
2888 kwargs["trees"] = self._dynamic_config._graph_trees
2889 return self._select_atoms_highest_available(*pargs, **kwargs)
2891 def _select_atoms_highest_available(self, root, depstring,
2892 myuse=None, parent=None, strict=True, trees=None, priority=None):
2893 """This will raise InvalidDependString if necessary. If trees is
2894 None then self._dynamic_config._filtered_trees is used."""
2896 if not isinstance(depstring, list):
2898 is_valid_flag = None
2899 if parent is not None:
2900 eapi = parent.metadata['EAPI']
2901 if not parent.installed:
2902 is_valid_flag = parent.iuse.is_valid_flag
2903 depstring = portage.dep.use_reduce(depstring,
2904 uselist=myuse, opconvert=True, token_class=Atom,
2905 is_valid_flag=is_valid_flag, eapi=eapi)
2907 if (self._dynamic_config.myparams.get(
2908 "ignore_built_slot_operator_deps", "n") == "y" and
2909 parent and parent.built):
2910 ignore_built_slot_operator_deps(depstring)
2912 pkgsettings = self._frozen_config.pkgsettings[root]
2914 trees = self._dynamic_config._filtered_trees
2915 mytrees = trees[root]
2916 atom_graph = digraph()
2918 # Temporarily disable autounmask so that || preferences
2919 # account for masking and USE settings.
2920 _autounmask_backup = self._dynamic_config._autounmask
2921 self._dynamic_config._autounmask = False
2922 # backup state for restoration, in case of recursive
2923 # calls to this method
2924 backup_state = mytrees.copy()
2926 # clear state from previous call, in case this
2927 # call is recursive (we have a backup, that we
2928 # will use to restore it later)
2929 mytrees.pop("pkg_use_enabled", None)
2930 mytrees.pop("parent", None)
2931 mytrees.pop("atom_graph", None)
2932 mytrees.pop("priority", None)
2934 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2935 if parent is not None:
2936 mytrees["parent"] = parent
2937 mytrees["atom_graph"] = atom_graph
2938 if priority is not None:
2939 mytrees["priority"] = priority
2941 mycheck = portage.dep_check(depstring, None,
2942 pkgsettings, myuse=myuse,
2943 myroot=root, trees=trees)
2946 self._dynamic_config._autounmask = _autounmask_backup
2947 mytrees.pop("pkg_use_enabled", None)
2948 mytrees.pop("parent", None)
2949 mytrees.pop("atom_graph", None)
2950 mytrees.pop("priority", None)
2951 mytrees.update(backup_state)
2953 raise portage.exception.InvalidDependString(mycheck[1])
2955 selected_atoms = mycheck[1]
2956 elif parent not in atom_graph:
2957 selected_atoms = {parent : mycheck[1]}
2959 # Recursively traversed virtual dependencies, and their
2960 # direct dependencies, are considered to have the same
2961 # depth as direct dependencies.
2962 if parent.depth is None:
2965 virt_depth = parent.depth + 1
2966 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2967 selected_atoms = OrderedDict()
2968 node_stack = [(parent, None, None)]
2969 traversed_nodes = set()
2971 node, node_parent, parent_atom = node_stack.pop()
2972 traversed_nodes.add(node)
2976 if node_parent is parent:
2977 if priority is None:
2978 node_priority = None
2980 node_priority = priority.copy()
2982 # virtuals only have runtime deps
2983 node_priority = self._priority(runtime=True)
2985 k = Dependency(atom=parent_atom,
2986 blocker=parent_atom.blocker, child=node,
2987 depth=virt_depth, parent=node_parent,
2988 priority=node_priority, root=node.root)
2991 selected_atoms[k] = child_atoms
2992 for atom_node in atom_graph.child_nodes(node):
2993 child_atom = atom_node[0]
2994 if id(child_atom) not in chosen_atom_ids:
2996 child_atoms.append(child_atom)
2997 for child_node in atom_graph.child_nodes(atom_node):
2998 if child_node in traversed_nodes:
3000 if not portage.match_from_list(
3001 child_atom, [child_node]):
3002 # Typically this means that the atom
3003 # specifies USE deps that are unsatisfied
3004 # by the selected package. The caller will
3005 # record this as an unsatisfied dependency
3008 node_stack.append((child_node, node, child_atom))
3010 return selected_atoms
3012 def _expand_virt_from_graph(self, root, atom):
3013 if not isinstance(atom, Atom):
3015 graphdb = self._dynamic_config.mydbapi[root]
3016 match = graphdb.match_pkgs(atom)
3021 if not pkg.cpv.startswith("virtual/"):
3025 rdepend = self._select_atoms_from_graph(
3026 pkg.root, pkg.metadata.get("RDEPEND", ""),
3027 myuse=self._pkg_use_enabled(pkg),
3028 parent=pkg, strict=False)
3029 except InvalidDependString as e:
3030 writemsg_level("!!! Invalid RDEPEND in " + \
3031 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3032 (pkg.root, pkg.cpv, e),
3033 noiselevel=-1, level=logging.ERROR)
3037 for atoms in rdepend.values():
3039 if hasattr(atom, "_orig_atom"):
3040 # Ignore virtual atoms since we're only
3041 # interested in expanding the real atoms.
3045 def _virt_deps_visible(self, pkg, ignore_use=False):
3047 Assumes pkg is a virtual package. Traverses virtual deps recursively
3048 and returns True if all deps are visible, False otherwise. This is
3049 useful for checking if it will be necessary to expand virtual slots,
3050 for cases like bug #382557.
3053 rdepend = self._select_atoms(
3054 pkg.root, pkg.metadata.get("RDEPEND", ""),
3055 myuse=self._pkg_use_enabled(pkg),
3056 parent=pkg, priority=self._priority(runtime=True))
3057 except InvalidDependString as e:
3058 if not pkg.installed:
3060 writemsg_level("!!! Invalid RDEPEND in " + \
3061 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3062 (pkg.root, pkg.cpv, e),
3063 noiselevel=-1, level=logging.ERROR)
3066 for atoms in rdepend.values():
3069 atom = atom.without_use
3070 pkg, existing = self._select_package(
3072 if pkg is None or not self._pkg_visibility_check(pkg):
3077 def _get_dep_chain(self, start_node, target_atom=None,
3078 unsatisfied_dependency=False):
3080 Returns a list of (atom, node_type) pairs that represent a dep chain.
3081 If target_atom is None, the first package shown is pkg's parent.
3082 If target_atom is not None the first package shown is pkg.
3083 If unsatisfied_dependency is True, the first parent is select who's
3084 dependency is not satisfied by 'pkg'. This is need for USE changes.
3085 (Does not support target_atom.)
3087 traversed_nodes = set()
3091 all_parents = self._dynamic_config._parent_atoms
3092 graph = self._dynamic_config.digraph
3094 if target_atom is not None and isinstance(node, Package):
3095 affecting_use = set()
3096 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
3098 affecting_use.update(extract_affecting_use(
3099 node.metadata[dep_str], target_atom,
3100 eapi=node.metadata["EAPI"]))
3101 except InvalidDependString:
3102 if not node.installed:
3104 affecting_use.difference_update(node.use.mask, node.use.force)
3105 pkg_name = _unicode_decode("%s") % (node.cpv,)
3108 for flag in affecting_use:
3109 if flag in self._pkg_use_enabled(node):
3112 usedep.append("-"+flag)
3113 pkg_name += "[%s]" % ",".join(usedep)
3115 dep_chain.append((pkg_name, node.type_name))
3118 # To build a dep chain for the given package we take
3119 # "random" parents form the digraph, except for the
3120 # first package, because we want a parent that forced
3121 # the corresponding change (i.e '>=foo-2', instead 'foo').
3123 traversed_nodes.add(start_node)
3125 start_node_parent_atoms = {}
3126 for ppkg, patom in all_parents.get(node, []):
3127 # Get a list of suitable atoms. For use deps
3128 # (aka unsatisfied_dependency is not None) we
3129 # need that the start_node doesn't match the atom.
3130 if not unsatisfied_dependency or \
3131 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
3132 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
3134 if start_node_parent_atoms:
3135 # If there are parents in all_parents then use one of them.
3136 # If not, then this package got pulled in by an Arg and
3137 # will be correctly handled by the code that handles later
3138 # packages in the dep chain.
3139 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
3142 for ppkg in start_node_parent_atoms[best_match]:
3144 if ppkg in self._dynamic_config._initial_arg_list:
3145 # Stop if reached the top level of the dep chain.
3148 while node is not None:
3149 traversed_nodes.add(node)
3151 if node not in graph:
3152 # The parent is not in the graph due to backtracking.
3155 elif isinstance(node, DependencyArg):
3156 if graph.parent_nodes(node):
3159 node_type = "argument"
3160 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
3162 elif node is not start_node:
3163 for ppkg, patom in all_parents[child]:
3165 if child is start_node and unsatisfied_dependency and \
3166 InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
3167 # This atom is satisfied by child, there must be another atom.
3169 atom = patom.unevaluated_atom
3173 priorities = graph.nodes[node][0].get(child)
3174 if priorities is None:
3175 # This edge comes from _parent_atoms and was not added to
3176 # the graph, and _parent_atoms does not contain priorities.
3177 dep_strings.add(node.metadata["DEPEND"])
3178 dep_strings.add(node.metadata["RDEPEND"])
3179 dep_strings.add(node.metadata["PDEPEND"])
3181 for priority in priorities:
3182 if priority.buildtime:
3183 dep_strings.add(node.metadata["DEPEND"])
3184 if priority.runtime:
3185 dep_strings.add(node.metadata["RDEPEND"])
3186 if priority.runtime_post:
3187 dep_strings.add(node.metadata["PDEPEND"])
3189 affecting_use = set()
3190 for dep_str in dep_strings:
3192 affecting_use.update(extract_affecting_use(
3193 dep_str, atom, eapi=node.metadata["EAPI"]))
3194 except InvalidDependString:
3195 if not node.installed:
3198 #Don't show flags as 'affecting' if the user can't change them,
3199 affecting_use.difference_update(node.use.mask, \
3202 pkg_name = _unicode_decode("%s") % (node.cpv,)
3205 for flag in affecting_use:
3206 if flag in self._pkg_use_enabled(node):
3209 usedep.append("-"+flag)
3210 pkg_name += "[%s]" % ",".join(usedep)
3212 dep_chain.append((pkg_name, node.type_name))
3214 # When traversing to parents, prefer arguments over packages
3215 # since arguments are root nodes. Never traverse the same
3216 # package twice, in order to prevent an infinite loop.
3218 selected_parent = None
3221 parent_unsatisfied = None
3223 for parent in self._dynamic_config.digraph.parent_nodes(node):
3224 if parent in traversed_nodes:
3226 if isinstance(parent, DependencyArg):
3229 if isinstance(parent, Package) and \
3230 parent.operation == "merge":
3231 parent_merge = parent
3232 if unsatisfied_dependency and node is start_node:
3233 # Make sure that pkg doesn't satisfy parent's dependency.
3234 # This ensures that we select the correct parent for use
3236 for ppkg, atom in all_parents[start_node]:
3238 atom_set = InternalPackageSet(initial_atoms=(atom,))
3239 if not atom_set.findAtomForPackage(start_node):
3240 parent_unsatisfied = parent
3243 selected_parent = parent
3245 if parent_unsatisfied is not None:
3246 selected_parent = parent_unsatisfied
3247 elif parent_merge is not None:
3248 # Prefer parent in the merge list (bug #354747).
3249 selected_parent = parent_merge
3250 elif parent_arg is not None:
3251 if self._dynamic_config.digraph.parent_nodes(parent_arg):
3252 selected_parent = parent_arg
3255 (_unicode_decode("%s") % (parent_arg,), "argument"))
3256 selected_parent = None
3258 node = selected_parent
3261 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
3262 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
3264 for node, node_type in dep_chain:
3265 if node_type == "argument":
3266 display_list.append("required by %s (argument)" % node)
3268 display_list.append("required by %s" % node)
3270 msg = "#" + ", ".join(display_list) + "\n"
3274 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
3275 check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
3277 When check_backtrack=True, no output is produced and
3278 the method either returns or raises _backtrack_mask if
3279 a matching package has been masked by backtracking.
3281 backtrack_mask = False
3282 autounmask_broke_use_dep = False
3283 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
3285 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
3287 xinfo = '"%s"' % atom.unevaluated_atom
3290 if isinstance(myparent, AtomArg):
3291 xinfo = _unicode_decode('"%s"') % (myparent,)
3292 # Discard null/ from failed cpv_expand category expansion.
3293 xinfo = xinfo.replace("null/", "")
3294 if root != self._frozen_config._running_root.root:
3295 xinfo = "%s for %s" % (xinfo, root)
3296 masked_packages = []
3298 missing_use_adjustable = set()
3299 required_use_unsatisfied = []
3300 masked_pkg_instances = set()
3301 have_eapi_mask = False
3302 pkgsettings = self._frozen_config.pkgsettings[root]
3303 root_config = self._frozen_config.roots[root]
3304 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3305 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3306 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
3307 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3308 for db, pkg_type, built, installed, db_keys in dbs:
3311 if hasattr(db, "xmatch"):
3312 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
3314 cpv_list = db.match(atom.without_use)
3316 if atom.repo is None and hasattr(db, "getRepositories"):
3317 repo_list = db.getRepositories()
3319 repo_list = [atom.repo]
3323 for cpv in cpv_list:
3324 for repo in repo_list:
3325 if not db.cpv_exists(cpv, myrepo=repo):
3328 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
3329 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
3330 if metadata is not None and \
3331 portage.eapi_is_supported(metadata["EAPI"]):
3333 repo = metadata.get('repository')
3334 pkg = self._pkg(cpv, pkg_type, root_config,
3335 installed=installed, myrepo=repo)
3336 # pkg.metadata contains calculated USE for ebuilds,
3337 # required later for getMissingLicenses.
3338 metadata = pkg.metadata
3340 # Avoid doing any operations with packages that
3341 # have invalid metadata. It would be unsafe at
3342 # least because it could trigger unhandled
3343 # exceptions in places like check_required_use().
3344 masked_packages.append(
3345 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3347 if not atom_set.findAtomForPackage(pkg,
3348 modified_use=self._pkg_use_enabled(pkg)):
3350 if pkg in self._dynamic_config._runtime_pkg_mask:
3351 backtrack_reasons = \
3352 self._dynamic_config._runtime_pkg_mask[pkg]
3353 mreasons.append('backtracking: %s' % \
3354 ', '.join(sorted(backtrack_reasons)))
3355 backtrack_mask = True
3356 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3357 modified_use=self._pkg_use_enabled(pkg)):
3358 mreasons = ["exclude option"]
3360 masked_pkg_instances.add(pkg)
3361 if atom.unevaluated_atom.use:
3363 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3364 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3365 missing_use.append(pkg)
3366 if atom_set_with_use.findAtomForPackage(pkg):
3367 autounmask_broke_use_dep = True
3371 writemsg("violated_conditionals raised " + \
3372 "InvalidAtom: '%s' parent: %s" % \
3373 (atom, myparent), noiselevel=-1)
3375 if not mreasons and \
3377 pkg.metadata.get("REQUIRED_USE") and \
3378 eapi_has_required_use(pkg.metadata["EAPI"]):
3379 if not check_required_use(
3380 pkg.metadata["REQUIRED_USE"],
3381 self._pkg_use_enabled(pkg),
3382 pkg.iuse.is_valid_flag,
3383 eapi=pkg.metadata["EAPI"]):
3384 required_use_unsatisfied.append(pkg)
3386 root_slot = (pkg.root, pkg.slot_atom)
3387 if pkg.built and root_slot in self._rebuild.rebuild_list:
3388 mreasons = ["need to rebuild from source"]
3389 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3390 mreasons = ["need to rebuild from source"]
3391 elif pkg.built and not mreasons:
3392 mreasons = ["use flag configuration mismatch"]
3393 masked_packages.append(
3394 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3398 raise self._backtrack_mask()
3402 if check_autounmask_breakage:
3403 if autounmask_broke_use_dep:
3404 raise self._autounmask_breakage()
3408 missing_use_reasons = []
3409 missing_iuse_reasons = []
3410 for pkg in missing_use:
3411 use = self._pkg_use_enabled(pkg)
3413 #Use the unevaluated atom here, because some flags might have gone
3414 #lost during evaluation.
3415 required_flags = atom.unevaluated_atom.use.required
3416 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3420 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3421 missing_iuse_reasons.append((pkg, mreasons))
3423 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3424 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3426 untouchable_flags = \
3427 frozenset(chain(pkg.use.mask, pkg.use.force))
3428 if any(x in untouchable_flags for x in
3429 chain(need_enable, need_disable)):
3432 missing_use_adjustable.add(pkg)
3433 required_use = pkg.metadata.get("REQUIRED_USE")
3434 required_use_warning = ""
3436 old_use = self._pkg_use_enabled(pkg)
3437 new_use = set(self._pkg_use_enabled(pkg))
3438 for flag in need_enable:
3440 for flag in need_disable:
3441 new_use.discard(flag)
3442 if check_required_use(required_use, old_use,
3443 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) \
3444 and not check_required_use(required_use, new_use,
3445 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
3446 required_use_warning = ", this change violates use flag constraints " + \
3447 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3449 if need_enable or need_disable:
3451 changes.extend(colorize("red", "+" + x) \
3452 for x in need_enable)
3453 changes.extend(colorize("blue", "-" + x) \
3454 for x in need_disable)
3455 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3456 missing_use_reasons.append((pkg, mreasons))
3458 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3459 # Lets see if the violated use deps are conditional.
3460 # If so, suggest to change them on the parent.
3462 # If the child package is masked then a change to
3463 # parent USE is not a valid solution (a normal mask
3464 # message should be displayed instead).
3465 if pkg in masked_pkg_instances:
3469 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3470 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3471 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3472 #all violated use deps are conditional
3474 conditional = violated_atom.use.conditional
3475 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3476 conditional.enabled, conditional.disabled))
3478 untouchable_flags = \
3479 frozenset(chain(myparent.use.mask, myparent.use.force))
3480 if any(x in untouchable_flags for x in involved_flags):
3483 required_use = myparent.metadata.get("REQUIRED_USE")
3484 required_use_warning = ""
3486 old_use = self._pkg_use_enabled(myparent)
3487 new_use = set(self._pkg_use_enabled(myparent))
3488 for flag in involved_flags:
3490 new_use.discard(flag)
3493 if check_required_use(required_use, old_use,
3494 myparent.iuse.is_valid_flag,
3495 eapi=myparent.metadata["EAPI"]) and \
3496 not check_required_use(required_use, new_use,
3497 myparent.iuse.is_valid_flag,
3498 eapi=myparent.metadata["EAPI"]):
3499 required_use_warning = ", this change violates use flag constraints " + \
3500 "defined by %s: '%s'" % (myparent.cpv, \
3501 human_readable_required_use(required_use))
3503 for flag in involved_flags:
3504 if flag in self._pkg_use_enabled(myparent):
3505 changes.append(colorize("blue", "-" + flag))
3507 changes.append(colorize("red", "+" + flag))
3508 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3509 if (myparent, mreasons) not in missing_use_reasons:
3510 missing_use_reasons.append((myparent, mreasons))
3512 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3513 in missing_use_reasons if pkg not in masked_pkg_instances]
3515 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3516 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3518 show_missing_use = False
3519 if unmasked_use_reasons:
3520 # Only show the latest version.
3521 show_missing_use = []
3523 parent_reason = None
3524 for pkg, mreasons in unmasked_use_reasons:
3526 if parent_reason is None:
3527 #This happens if a use change on the parent
3528 #leads to a satisfied conditional use dep.
3529 parent_reason = (pkg, mreasons)
3530 elif pkg_reason is None:
3531 #Don't rely on the first pkg in unmasked_use_reasons,
3532 #being the highest version of the dependency.
3533 pkg_reason = (pkg, mreasons)
3535 show_missing_use.append(pkg_reason)
3537 show_missing_use.append(parent_reason)
3539 elif unmasked_iuse_reasons:
3540 masked_with_iuse = False
3541 for pkg in masked_pkg_instances:
3542 #Use atom.unevaluated here, because some flags might have gone
3543 #lost during evaluation.
3544 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3545 # Package(s) with required IUSE are masked,
3546 # so display a normal masking message.
3547 masked_with_iuse = True
3549 if not masked_with_iuse:
3550 show_missing_use = unmasked_iuse_reasons
3552 if required_use_unsatisfied:
3553 # If there's a higher unmasked version in missing_use_adjustable
3554 # then we want to show that instead.
3555 for pkg in missing_use_adjustable:
3556 if pkg not in masked_pkg_instances and \
3557 pkg > required_use_unsatisfied[0]:
3558 required_use_unsatisfied = False
3563 if show_req_use is None and required_use_unsatisfied:
3564 # We have an unmasked package that only requires USE adjustment
3565 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3566 # that the user wants the latest version, so only the first
3567 # instance is displayed.
3568 show_req_use = required_use_unsatisfied[0]
3570 if show_req_use is not None:
3573 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3574 writemsg("\n!!! " + \
3575 colorize("BAD", "The ebuild selected to satisfy ") + \
3576 colorize("INFORM", xinfo) + \
3577 colorize("BAD", " has unmet requirements.") + "\n",
3579 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3580 writemsg("- %s %s\n" % (output_cpv, use_display),
3582 writemsg("\n The following REQUIRED_USE flag constraints " + \
3583 "are unsatisfied:\n", noiselevel=-1)
3584 reduced_noise = check_required_use(
3585 pkg.metadata["REQUIRED_USE"],
3586 self._pkg_use_enabled(pkg),
3587 pkg.iuse.is_valid_flag,
3588 eapi=pkg.metadata["EAPI"]).tounicode()
3589 writemsg(" %s\n" % \
3590 human_readable_required_use(reduced_noise),
3592 normalized_required_use = \
3593 " ".join(pkg.metadata["REQUIRED_USE"].split())
3594 if reduced_noise != normalized_required_use:
3595 writemsg("\n The above constraints " + \
3596 "are a subset of the following complete expression:\n",
3598 writemsg(" %s\n" % \
3599 human_readable_required_use(normalized_required_use),
3601 writemsg("\n", noiselevel=-1)
3603 elif show_missing_use:
3604 writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3605 writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3606 for pkg, mreasons in show_missing_use:
3607 writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3609 elif masked_packages:
3610 writemsg("\n!!! " + \
3611 colorize("BAD", "All ebuilds that could satisfy ") + \
3612 colorize("INFORM", xinfo) + \
3613 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3614 writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3615 have_eapi_mask = show_masked_packages(masked_packages)
3617 writemsg("\n", noiselevel=-1)
3618 msg = ("The current version of portage supports " + \
3619 "EAPI '%s'. You must upgrade to a newer version" + \
3620 " of portage before EAPI masked packages can" + \
3621 " be installed.") % portage.const.EAPI
3622 writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3623 writemsg("\n", noiselevel=-1)
3627 if not atom.cp.startswith("null/"):
3628 for pkg in self._iter_match_pkgs_any(
3629 root_config, Atom(atom.cp)):
3633 writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3634 if isinstance(myparent, AtomArg) and \
3636 self._frozen_config.myopts.get(
3637 "--misspell-suggestions", "y") != "n":
3638 cp = myparent.atom.cp.lower()
3639 cat, pkg = portage.catsplit(cp)
3643 writemsg("\nemerge: searching for similar names..."
3647 all_cp.update(vardb.cp_all())
3648 if "--usepkgonly" not in self._frozen_config.myopts:
3649 all_cp.update(portdb.cp_all())
3650 if "--usepkg" in self._frozen_config.myopts:
3651 all_cp.update(bindb.cp_all())
3652 # discard dir containing no ebuilds
3656 for cp_orig in all_cp:
3657 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3658 all_cp = set(orig_cp_map)
3661 matches = difflib.get_close_matches(cp, all_cp)
3664 for other_cp in list(all_cp):
3665 other_pkg = portage.catsplit(other_cp)[1]
3666 if other_pkg == pkg:
3667 # Check for non-identical package that
3668 # differs only by upper/lower case.
3670 for cp_orig in orig_cp_map[other_cp]:
3671 if portage.catsplit(cp_orig)[1] != \
3672 portage.catsplit(atom.cp)[1]:
3676 # discard dir containing no ebuilds
3677 all_cp.discard(other_cp)
3679 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3680 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3682 for pkg_match in pkg_matches:
3683 matches.extend(pkg_to_cp[pkg_match])
3685 matches_orig_case = []
3687 matches_orig_case.extend(orig_cp_map[cp])
3688 matches = matches_orig_case
3690 if len(matches) == 1:
3691 writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
3693 elif len(matches) > 1:
3695 "\nemerge: Maybe you meant any of these: %s?\n" % \
3696 (", ".join(matches),), noiselevel=-1)
3698 # Generally, this would only happen if
3699 # all dbapis are empty.
3700 writemsg(" nothing similar found.\n"
3703 if not isinstance(myparent, AtomArg):
3704 # It's redundant to show parent for AtomArg since
3705 # it's the same as 'xinfo' displayed above.
3706 dep_chain = self._get_dep_chain(myparent, atom)
3707 for node, node_type in dep_chain:
3708 msg.append('(dependency required by "%s" [%s])' % \
3709 (colorize('INFORM', _unicode_decode("%s") % \
3710 (node)), node_type))
3713 writemsg("\n".join(msg), noiselevel=-1)
3714 writemsg("\n", noiselevel=-1)
3718 writemsg("\n", noiselevel=-1)
3720 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3721 for db, pkg_type, built, installed, db_keys in \
3722 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3723 for pkg in self._iter_match_pkgs(root_config,
3724 pkg_type, atom, onlydeps=onlydeps):
3727 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3729 Iterate over Package instances of pkg_type matching the given atom.
3730 This does not check visibility and it also does not match USE for
3731 unbuilt ebuilds since USE are lazily calculated after visibility
3732 checks (to avoid the expense when possible).
3735 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3736 atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
3737 cp_list = db.cp_list(atom_exp.cp)
3738 matched_something = False
3739 installed = pkg_type == 'installed'
3742 atom_set = InternalPackageSet(initial_atoms=(atom,),
3744 if atom.repo is None and hasattr(db, "getRepositories"):
3745 repo_list = db.getRepositories()
3747 repo_list = [atom.repo]
3752 # Call match_from_list on one cpv at a time, in order
3753 # to avoid unnecessary match_from_list comparisons on
3754 # versions that are never yielded from this method.
3755 if not match_from_list(atom_exp, [cpv]):
3757 for repo in repo_list:
3760 pkg = self._pkg(cpv, pkg_type, root_config,
3761 installed=installed, onlydeps=onlydeps, myrepo=repo)
3762 except portage.exception.PackageNotFound:
3765 # A cpv can be returned from dbapi.match() as an
3766 # old-style virtual match even in cases when the
3767 # package does not actually PROVIDE the virtual.
3768 # Filter out any such false matches here.
3770 # Make sure that cpv from the current repo satisfies the atom.
3771 # This might not be the case if there are several repos with
3772 # the same cpv, but different metadata keys, like SLOT.
3773 # Also, parts of the match that require metadata access
3774 # are deferred until we have cached the metadata in a
3776 if not atom_set.findAtomForPackage(pkg,
3777 modified_use=self._pkg_use_enabled(pkg)):
3779 matched_something = True
3782 # USE=multislot can make an installed package appear as if
3783 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3784 # won't do any good as long as USE=multislot is enabled since
3785 # the newly built package still won't have the expected slot.
3786 # Therefore, assume that such SLOT dependencies are already
3787 # satisfied rather than forcing a rebuild.
3788 if not matched_something and installed and atom.slot is not None:
3790 if "remove" in self._dynamic_config.myparams:
3791 # We need to search the portdbapi, which is not in our
3792 # normal dbs list, in order to find the real SLOT.
3793 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
3794 db_keys = list(portdb._aux_cache_keys)
3795 dbs = [(portdb, "ebuild", False, False, db_keys)]
3797 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
3799 cp_list = db.cp_list(atom_exp.cp)
3801 atom_set = InternalPackageSet(
3802 initial_atoms=(atom.without_slot,), allow_repo=True)
3803 atom_exp_without_slot = atom_exp.without_slot
3806 if not match_from_list(atom_exp_without_slot, [cpv]):
3808 slot_available = False
3809 for other_db, other_type, other_built, \
3810 other_installed, other_keys in dbs:
3813 other_db.aux_get(cpv, ["SLOT"])[0]:
3814 slot_available = True
3818 if not slot_available:
3820 inst_pkg = self._pkg(cpv, "installed",
3821 root_config, installed=installed, myrepo=atom.repo)
3822 # Remove the slot from the atom and verify that
3823 # the package matches the resulting atom.
3824 if atom_set.findAtomForPackage(inst_pkg):
3828 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3829 cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
3830 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3833 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3834 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3837 if self._pkg_visibility_check(pkg) and \
3838 not (pkg.installed and pkg.masks):
3839 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3842 def _want_installed_pkg(self, pkg):
3844 Given an installed package returned from select_pkg, return
3845 True if the user has not explicitly requested for this package
3846 to be replaced (typically via an atom on the command line).
3848 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
3849 modified_use=self._pkg_use_enabled(pkg)):
3854 for arg, atom in self._iter_atoms_for_pkg(pkg):
3855 if arg.force_reinstall:
3857 except InvalidDependString:
3860 if "selective" in self._dynamic_config.myparams:
3865 def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
3868 pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
3869 except portage.exception.PackageNotFound:
3870 pkg_eb_visible = False
3871 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3872 "ebuild", Atom("=%s" % (pkg.cpv,))):
3873 if self._pkg_visibility_check(pkg_eb, autounmask_level):
3874 pkg_eb_visible = True
3876 if not pkg_eb_visible:
3879 if not self._pkg_visibility_check(pkg_eb, autounmask_level):
3884 def _equiv_binary_installed(self, pkg):
3885 build_time = pkg.metadata.get('BUILD_TIME')
3890 inst_pkg = self._pkg(pkg.cpv, "installed",
3891 pkg.root_config, installed=True)
3892 except PackageNotFound:
3895 return build_time == inst_pkg.metadata.get('BUILD_TIME')
3897 class _AutounmaskLevel(object):
3898 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
3899 "allow_missing_keywords", "allow_unmasks")
3902 self.allow_use_changes = False
3903 self.allow_license_changes = False
3904 self.allow_unstable_keywords = False
3905 self.allow_missing_keywords = False
3906 self.allow_unmasks = False
3908 def _autounmask_levels(self):
3910 Iterate over the different allowed things to unmask.
3914 2. USE + ~arch + license
3915 3. USE + ~arch + license + missing keywords
3916 4. USE + ~arch + license + masks
3917 5. USE + ~arch + license + missing keywords + masks
3920 * Do least invasive changes first.
3921 * Try unmasking alone before unmasking + missing keywords
3922 to avoid -9999 versions if possible
3925 if self._dynamic_config._autounmask is not True:
3928 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
3929 autounmask_level = self._AutounmaskLevel()
3931 autounmask_level.allow_use_changes = True
3932 yield autounmask_level
3934 autounmask_level.allow_license_changes = True
3935 yield autounmask_level
3937 for only_use_changes in (False,):
3939 autounmask_level.allow_unstable_keywords = (not only_use_changes)
3940 autounmask_level.allow_license_changes = (not only_use_changes)
3942 for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
3944 if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
3947 autounmask_level.allow_missing_keywords = missing_keyword
3948 autounmask_level.allow_unmasks = unmask
3950 yield autounmask_level
3953 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3954 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3956 default_selection = (pkg, existing)
3959 if pkg is not None and \
3961 not self._want_installed_pkg(pkg):
3964 if self._dynamic_config._autounmask is True:
3967 for autounmask_level in self._autounmask_levels():
3972 self._wrapped_select_pkg_highest_available_imp(
3973 root, atom, onlydeps=onlydeps,
3974 autounmask_level=autounmask_level)
3978 if self._dynamic_config._need_restart:
3982 # This ensures that we can fall back to an installed package
3983 # that may have been rejected in the autounmask path above.
3984 return default_selection
3986 return pkg, existing
3988 def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
3993 if trust_graph and pkg in self._dynamic_config.digraph:
3994 # Sometimes we need to temporarily disable
3995 # dynamic_config._autounmask, but for overall
3996 # consistency in dependency resolution, in most
3997 # cases we want to treat packages in the graph
3998 # as though they are visible.
4001 if not self._dynamic_config._autounmask or autounmask_level is None:
4004 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
4005 root_config = self._frozen_config.roots[pkg.root]
4006 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
4008 masked_by_unstable_keywords = False
4009 masked_by_missing_keywords = False
4010 missing_licenses = None
4011 masked_by_something_else = False
4012 masked_by_p_mask = False
4014 for reason in mreasons:
4015 hint = reason.unmask_hint
4018 masked_by_something_else = True
4019 elif hint.key == "unstable keyword":
4020 masked_by_unstable_keywords = True
4021 if hint.value == "**":
4022 masked_by_missing_keywords = True
4023 elif hint.key == "p_mask":
4024 masked_by_p_mask = True
4025 elif hint.key == "license":
4026 missing_licenses = hint.value
4028 masked_by_something_else = True
4030 if masked_by_something_else:
4033 if pkg in self._dynamic_config._needed_unstable_keywords:
4034 #If the package is already keyworded, remove the mask.
4035 masked_by_unstable_keywords = False
4036 masked_by_missing_keywords = False
4038 if pkg in self._dynamic_config._needed_p_mask_changes:
4039 #If the package is already keyworded, remove the mask.
4040 masked_by_p_mask = False
4042 if missing_licenses:
4043 #If the needed licenses are already unmasked, remove the mask.
4044 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
4046 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
4047 #Package has already been unmasked.
4050 if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
4051 (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
4052 (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
4053 (missing_licenses and not autounmask_level.allow_license_changes):
4054 #We are not allowed to do the needed changes.
4057 if masked_by_unstable_keywords:
4058 self._dynamic_config._needed_unstable_keywords.add(pkg)
4059 backtrack_infos = self._dynamic_config._backtrack_infos
4060 backtrack_infos.setdefault("config", {})
4061 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
4062 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
4064 if masked_by_p_mask:
4065 self._dynamic_config._needed_p_mask_changes.add(pkg)
4066 backtrack_infos = self._dynamic_config._backtrack_infos
4067 backtrack_infos.setdefault("config", {})
4068 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
4069 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
4071 if missing_licenses:
4072 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
4073 backtrack_infos = self._dynamic_config._backtrack_infos
4074 backtrack_infos.setdefault("config", {})
4075 backtrack_infos["config"].setdefault("needed_license_changes", set())
4076 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
4080 def _pkg_use_enabled(self, pkg, target_use=None):
4082 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
4083 If target_use is given, the need changes are computed to make the package useable.
4084 Example: target_use = { "foo": True, "bar": False }
4085 The flags target_use must be in the pkg's IUSE.
4088 return pkg.use.enabled
4089 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
4091 if target_use is None:
4092 if needed_use_config_change is None:
4093 return pkg.use.enabled
4095 return needed_use_config_change[0]
4097 if needed_use_config_change is not None:
4098 old_use = needed_use_config_change[0]
4100 old_changes = needed_use_config_change[1]
4101 new_changes = old_changes.copy()
4103 old_use = pkg.use.enabled
4108 for flag, state in target_use.items():
4110 if flag not in old_use:
4111 if new_changes.get(flag) == False:
4113 new_changes[flag] = True
4117 if new_changes.get(flag) == True:
4119 new_changes[flag] = False
4120 new_use.update(old_use.difference(target_use))
4122 def want_restart_for_use_change(pkg, new_use):
4123 if pkg not in self._dynamic_config.digraph.nodes:
4126 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
4127 dep = pkg.metadata[key]
4128 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4129 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
4131 if old_val != new_val:
4134 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
4135 if not parent_atoms:
4138 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
4139 for ppkg, atom in parent_atoms:
4140 if not atom.use or \
4141 not any(x in atom.use.required for x in changes):
4148 if new_changes != old_changes:
4149 #Don't do the change if it violates REQUIRED_USE.
4150 required_use = pkg.metadata.get("REQUIRED_USE")
4151 if required_use and check_required_use(required_use, old_use,
4152 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) and \
4153 not check_required_use(required_use, new_use,
4154 pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
4157 if any(x in pkg.use.mask for x in new_changes) or \
4158 any(x in pkg.use.force for x in new_changes):
4161 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
4162 backtrack_infos = self._dynamic_config._backtrack_infos
4163 backtrack_infos.setdefault("config", {})
4164 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
4165 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
4166 if want_restart_for_use_change(pkg, new_use):
4167 self._dynamic_config._need_restart = True
4170 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
4171 root_config = self._frozen_config.roots[root]
4172 pkgsettings = self._frozen_config.pkgsettings[root]
4173 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
4174 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
4175 # List of acceptable packages, ordered by type preference.
4176 matched_packages = []
4177 matched_pkgs_ignore_use = []
4178 highest_version = None
4179 if not isinstance(atom, portage.dep.Atom):
4180 atom = portage.dep.Atom(atom)
4182 have_new_virt = atom_cp.startswith("virtual/") and \
4183 self._have_new_virt(root, atom_cp)
4184 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
4185 existing_node = None
4187 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
4188 usepkg = "--usepkg" in self._frozen_config.myopts
4189 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
4190 empty = "empty" in self._dynamic_config.myparams
4191 selective = "selective" in self._dynamic_config.myparams
4193 avoid_update = "--update" not in self._frozen_config.myopts
4194 dont_miss_updates = "--update" in self._frozen_config.myopts
4195 use_ebuild_visibility = self._frozen_config.myopts.get(
4196 '--use-ebuild-visibility', 'n') != 'n'
4197 reinstall_atoms = self._frozen_config.reinstall_atoms
4198 usepkg_exclude = self._frozen_config.usepkg_exclude
4199 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
4201 # Behavior of the "selective" parameter depends on
4202 # whether or not a package matches an argument atom.
4203 # If an installed package provides an old-style
4204 # virtual that is no longer provided by an available
4205 # package, the installed package may match an argument
4206 # atom even though none of the available packages do.
4207 # Therefore, "selective" logic does not consider
4208 # whether or not an installed package matches an
4209 # argument atom. It only considers whether or not
4210 # available packages match argument atoms, which is
4211 # represented by the found_available_arg flag.
4212 found_available_arg = False
4213 packages_with_invalid_use_config = []
4214 for find_existing_node in True, False:
4217 for db, pkg_type, built, installed, db_keys in dbs:
4220 if installed and not find_existing_node:
4221 want_reinstall = reinstall or empty or \
4222 (found_available_arg and not selective)
4223 if want_reinstall and matched_packages:
4226 # Ignore USE deps for the initial match since we want to
4227 # ensure that updates aren't missed solely due to the user's
4228 # USE configuration.
4229 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
4231 if pkg.cp != atom_cp and have_new_virt:
4232 # pull in a new-style virtual instead
4234 if pkg in self._dynamic_config._runtime_pkg_mask:
4235 # The package has been masked by the backtracking logic
4237 root_slot = (pkg.root, pkg.slot_atom)
4238 if pkg.built and root_slot in self._rebuild.rebuild_list:
4240 if (pkg.installed and
4241 root_slot in self._rebuild.reinstall_list):
4244 if not pkg.installed and \
4245 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
4246 modified_use=self._pkg_use_enabled(pkg)):
4249 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
4250 modified_use=self._pkg_use_enabled(pkg)):
4253 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
4254 modified_use=self._pkg_use_enabled(pkg))
4256 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
4257 (not pkg.installed or dont_miss_updates):
4258 # Check if a higher version was rejected due to user
4259 # USE configuration. The packages_with_invalid_use_config
4260 # list only contains unbuilt ebuilds since USE can't
4261 # be changed for built packages.
4262 higher_version_rejected = False
4263 repo_priority = pkg.repo_priority
4264 for rejected in packages_with_invalid_use_config:
4265 if rejected.cp != pkg.cp:
4268 higher_version_rejected = True
4270 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
4271 # If version is identical then compare
4272 # repo priority (see bug #350254).
4273 rej_repo_priority = rejected.repo_priority
4274 if rej_repo_priority is not None and \
4275 (repo_priority is None or
4276 rej_repo_priority > repo_priority):
4277 higher_version_rejected = True
4279 if higher_version_rejected:
4283 reinstall_for_flags = None
4285 if not pkg.installed or \
4286 (matched_packages and not avoid_update):
4287 # Only enforce visibility on installed packages
4288 # if there is at least one other visible package
4289 # available. By filtering installed masked packages
4290 # here, packages that have been masked since they
4291 # were installed can be automatically downgraded
4292 # to an unmasked version. NOTE: This code needs to
4293 # be consistent with masking behavior inside
4294 # _dep_check_composite_db, in order to prevent
4295 # incorrect choices in || deps like bug #351828.
4297 if not self._pkg_visibility_check(pkg, autounmask_level):
4300 # Enable upgrade or downgrade to a version
4301 # with visible KEYWORDS when the installed
4302 # version is masked by KEYWORDS, but never
4303 # reinstall the same exact version only due
4304 # to a KEYWORDS mask. See bug #252167.
4306 if pkg.type_name != "ebuild" and matched_packages:
4307 # Don't re-install a binary package that is
4308 # identical to the currently installed package
4309 # (see bug #354441).
4310 identical_binary = False
4311 if usepkg and pkg.installed:
4312 for selected_pkg in matched_packages:
4313 if selected_pkg.type_name == "binary" and \
4314 selected_pkg.cpv == pkg.cpv and \
4315 selected_pkg.metadata.get('BUILD_TIME') == \
4316 pkg.metadata.get('BUILD_TIME'):
4317 identical_binary = True
4320 if not identical_binary:
4321 # If the ebuild no longer exists or it's
4322 # keywords have been dropped, reject built
4323 # instances (installed or binary).
4324 # If --usepkgonly is enabled, assume that
4325 # the ebuild status should be ignored.
4326 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
4327 if pkg.installed and pkg.masks:
4329 elif not self._equiv_ebuild_visible(pkg,
4330 autounmask_level=autounmask_level):
4333 # Calculation of USE for unbuilt ebuilds is relatively
4334 # expensive, so it is only performed lazily, after the
4335 # above visibility checks are complete.
4339 for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
4340 if myarg.force_reinstall:
4343 except InvalidDependString:
4345 # masked by corruption
4347 if not installed and myarg:
4348 found_available_arg = True
4350 if atom.unevaluated_atom.use:
4351 #Make sure we don't miss a 'missing IUSE'.
4352 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
4353 # Don't add this to packages_with_invalid_use_config
4354 # since IUSE cannot be adjusted by the user.
4359 matched_pkgs_ignore_use.append(pkg)
4360 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
4362 for flag in atom.use.enabled:
4363 target_use[flag] = True
4364 for flag in atom.use.disabled:
4365 target_use[flag] = False
4366 use = self._pkg_use_enabled(pkg, target_use)
4368 use = self._pkg_use_enabled(pkg)
4371 can_adjust_use = not pkg.built
4372 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
4373 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
4375 if atom.use.enabled:
4376 if any(x in atom.use.enabled for x in missing_disabled):
4378 can_adjust_use = False
4379 need_enabled = atom.use.enabled.difference(use)
4381 need_enabled = need_enabled.difference(missing_enabled)
4385 if any(x in pkg.use.mask for x in need_enabled):
4386 can_adjust_use = False
4388 if atom.use.disabled:
4389 if any(x in atom.use.disabled for x in missing_enabled):
4391 can_adjust_use = False
4392 need_disabled = atom.use.disabled.intersection(use)
4394 need_disabled = need_disabled.difference(missing_disabled)
4398 if any(x in pkg.use.force and x not in
4399 pkg.use.mask for x in need_disabled):
4400 can_adjust_use = False
4404 # Above we must ensure that this package has
4405 # absolutely no use.force, use.mask, or IUSE
4406 # issues that the user typically can't make
4407 # adjustments to solve (see bug #345979).
4408 # FIXME: Conditional USE deps complicate
4409 # issues. This code currently excludes cases
4410 # in which the user can adjust the parent
4411 # package's USE in order to satisfy the dep.
4412 packages_with_invalid_use_config.append(pkg)
4415 if pkg.cp == atom_cp:
4416 if highest_version is None:
4417 highest_version = pkg
4418 elif pkg > highest_version:
4419 highest_version = pkg
4420 # At this point, we've found the highest visible
4421 # match from the current repo. Any lower versions
4422 # from this repo are ignored, so this so the loop
4423 # will always end with a break statement below
4425 if find_existing_node:
4426 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4430 # Use PackageSet.findAtomForPackage()
4431 # for PROVIDE support.
4432 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
4433 if highest_version and \
4434 e_pkg.cp == atom_cp and \
4435 e_pkg < highest_version and \
4436 e_pkg.slot_atom != highest_version.slot_atom:
4437 # There is a higher version available in a
4438 # different slot, so this existing node is
4442 matched_packages.append(e_pkg)
4443 existing_node = e_pkg
4445 # Compare built package to current config and
4446 # reject the built package if necessary.
4447 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4448 ("--newuse" in self._frozen_config.myopts or \
4449 "--reinstall" in self._frozen_config.myopts or \
4450 (not installed and self._dynamic_config.myparams.get(
4451 "binpkg_respect_use") in ("y", "auto"))):
4452 iuses = pkg.iuse.all
4453 old_use = self._pkg_use_enabled(pkg)
4455 pkgsettings.setcpv(myeb)
4457 pkgsettings.setcpv(pkg)
4458 now_use = pkgsettings["PORTAGE_USE"].split()
4459 forced_flags = set()
4460 forced_flags.update(pkgsettings.useforce)
4461 forced_flags.update(pkgsettings.usemask)
4463 if myeb and not usepkgonly and not useoldpkg:
4464 cur_iuse = myeb.iuse.all
4465 reinstall_for_flags = self._reinstall_for_flags(pkg,
4466 forced_flags, old_use, iuses, now_use, cur_iuse)
4467 if reinstall_for_flags:
4468 if not pkg.installed:
4469 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4471 # Compare current config to installed package
4472 # and do not reinstall if possible.
4473 if not installed and not useoldpkg and \
4474 ("--newuse" in self._frozen_config.myopts or \
4475 "--reinstall" in self._frozen_config.myopts) and \
4476 cpv in vardb.match(atom):
4477 forced_flags = set()
4478 forced_flags.update(pkg.use.force)
4479 forced_flags.update(pkg.use.mask)
4480 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4481 old_use = inst_pkg.use.enabled
4482 old_iuse = inst_pkg.iuse.all
4483 cur_use = self._pkg_use_enabled(pkg)
4484 cur_iuse = pkg.iuse.all
4485 reinstall_for_flags = \
4486 self._reinstall_for_flags(pkg,
4487 forced_flags, old_use, old_iuse,
4489 if reinstall_for_flags:
4491 if reinstall_atoms.findAtomForPackage(pkg, \
4492 modified_use=self._pkg_use_enabled(pkg)):
4497 matched_oldpkg.append(pkg)
4498 matched_packages.append(pkg)
4499 if reinstall_for_flags:
4500 self._dynamic_config._reinstall_nodes[pkg] = \
4504 if not matched_packages:
4507 if "--debug" in self._frozen_config.myopts:
4508 for pkg in matched_packages:
4509 portage.writemsg("%s %s%s%s\n" % \
4510 ((pkg.type_name + ":").rjust(10),
4511 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4513 # Filter out any old-style virtual matches if they are
4514 # mixed with new-style virtual matches.
4516 if len(matched_packages) > 1 and \
4517 "virtual" == portage.catsplit(cp)[0]:
4518 for pkg in matched_packages:
4521 # Got a new-style virtual, so filter
4522 # out any old-style virtuals.
4523 matched_packages = [pkg for pkg in matched_packages \
4527 if existing_node is not None and \
4528 existing_node in matched_packages:
4529 return existing_node, existing_node
4531 if len(matched_packages) > 1:
4532 if rebuilt_binaries:
4536 for pkg in matched_packages:
4542 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4544 if built_pkg is not None and inst_pkg is not None:
4545 # Only reinstall if binary package BUILD_TIME is
4546 # non-empty, in order to avoid cases like to
4547 # bug #306659 where BUILD_TIME fields are missing
4548 # in local and/or remote Packages file.
4550 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
4551 except (KeyError, ValueError):
4555 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
4556 except (KeyError, ValueError):
4557 installed_timestamp = 0
4559 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4561 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4562 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4563 if built_timestamp and \
4564 built_timestamp > installed_timestamp and \
4565 built_timestamp >= minimal_timestamp:
4566 return built_pkg, existing_node
4568 #Don't care if the binary has an older BUILD_TIME than the installed
4569 #package. This is for closely tracking a binhost.
4570 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4572 if built_timestamp and \
4573 built_timestamp != installed_timestamp:
4574 return built_pkg, existing_node
4576 for pkg in matched_packages:
4577 if pkg.installed and pkg.invalid:
4578 matched_packages = [x for x in \
4579 matched_packages if x is not pkg]
4582 for pkg in matched_packages:
4583 if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
4584 return pkg, existing_node
4586 visible_matches = []
4588 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4589 if self._pkg_visibility_check(pkg, autounmask_level)]
4590 if not visible_matches:
4591 visible_matches = [pkg.cpv for pkg in matched_packages \
4592 if self._pkg_visibility_check(pkg, autounmask_level)]
4594 bestmatch = portage.best(visible_matches)
4596 # all are masked, so ignore visibility
4597 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4598 matched_packages = [pkg for pkg in matched_packages \
4599 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4601 # ordered by type preference ("ebuild" type is the last resort)
4602 return matched_packages[-1], existing_node
4604 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4606 Select packages that have already been added to the graph or
4607 those that are installed and have not been scheduled for
4610 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4611 matches = graph_db.match_pkgs(atom)
4614 pkg = matches[-1] # highest match
4615 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4616 return pkg, in_graph
4618 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4620 Select packages that are installed.
4622 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4626 if len(matches) > 1:
4627 matches.reverse() # ascending order
4628 unmasked = [pkg for pkg in matches if \
4629 self._pkg_visibility_check(pkg)]
4631 if len(unmasked) == 1:
4634 # Account for packages with masks (like KEYWORDS masks)
4635 # that are usually ignored in visibility checks for
4636 # installed packages, in order to handle cases like
4638 unmasked = [pkg for pkg in matches if not pkg.masks]
4641 pkg = matches[-1] # highest match
4642 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4643 return pkg, in_graph
4645 def _complete_graph(self, required_sets=None):
4647 Add any deep dependencies of required sets (args, system, world) that
4648 have not been pulled into the graph yet. This ensures that the graph
4649 is consistent such that initially satisfied deep dependencies are not
4650 broken in the new graph. Initially unsatisfied dependencies are
4651 irrelevant since we only want to avoid breaking dependencies that are
4652 initially satisfied.
4654 Since this method can consume enough time to disturb users, it is
4655 currently only enabled by the --complete-graph option.
4657 @param required_sets: contains required sets (currently only used
4658 for depclean and prune removal operations)
4659 @type required_sets: dict
4661 if "--buildpkgonly" in self._frozen_config.myopts or \
4662 "recurse" not in self._dynamic_config.myparams:
4665 complete_if_new_use = self._dynamic_config.myparams.get(
4666 "complete_if_new_use", "y") == "y"
4667 complete_if_new_ver = self._dynamic_config.myparams.get(
4668 "complete_if_new_ver", "y") == "y"
4669 rebuild_if_new_slot = self._dynamic_config.myparams.get(
4670 "rebuild_if_new_slot", "y") == "y"
4671 complete_if_new_slot = rebuild_if_new_slot
4673 if "complete" not in self._dynamic_config.myparams and \
4674 (complete_if_new_use or
4675 complete_if_new_ver or complete_if_new_slot):
4676 # Enable complete mode if an installed package will change somehow.
4678 version_change = False
4679 for node in self._dynamic_config.digraph:
4680 if not isinstance(node, Package) or \
4681 node.operation != "merge":
4683 vardb = self._frozen_config.roots[
4684 node.root].trees["vartree"].dbapi
4686 if complete_if_new_use or complete_if_new_ver:
4687 inst_pkg = vardb.match_pkgs(node.slot_atom)
4688 if inst_pkg and inst_pkg[0].cp == node.cp:
4689 inst_pkg = inst_pkg[0]
4690 if complete_if_new_ver and \
4691 (inst_pkg < node or node < inst_pkg):
4692 version_change = True
4695 # Intersect enabled USE with IUSE, in order to
4696 # ignore forced USE from implicit IUSE flags, since
4697 # they're probably irrelevant and they are sensitive
4698 # to use.mask/force changes in the profile.
4699 if complete_if_new_use and \
4700 (node.iuse.all != inst_pkg.iuse.all or
4701 self._pkg_use_enabled(node).intersection(node.iuse.all) !=
4702 self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
4706 if complete_if_new_slot:
4707 cp_list = vardb.match_pkgs(Atom(node.cp))
4708 if (cp_list and cp_list[0].cp == node.cp and
4709 not any(node.slot == pkg.slot for pkg in cp_list)):
4710 version_change = True
4713 if use_change or version_change:
4714 self._dynamic_config.myparams["complete"] = True
4716 if "complete" not in self._dynamic_config.myparams:
4721 # Put the depgraph into a mode that causes it to only
4722 # select packages that have already been added to the
4723 # graph or those that are installed and have not been
4724 # scheduled for replacement. Also, toggle the "deep"
4725 # parameter so that all dependencies are traversed and
4727 self._dynamic_config._complete_mode = True
4728 self._select_atoms = self._select_atoms_from_graph
4729 if "remove" in self._dynamic_config.myparams:
4730 self._select_package = self._select_pkg_from_installed
4732 self._select_package = self._select_pkg_from_graph
4733 self._dynamic_config._traverse_ignored_deps = True
4734 already_deep = self._dynamic_config.myparams.get("deep") is True
4735 if not already_deep:
4736 self._dynamic_config.myparams["deep"] = True
4738 # Invalidate the package selection cache, since
4739 # _select_package has just changed implementations.
4740 for trees in self._dynamic_config._filtered_trees.values():
4741 trees["porttree"].dbapi._clear_cache()
4743 args = self._dynamic_config._initial_arg_list[:]
4744 for root in self._frozen_config.roots:
4745 if root != self._frozen_config.target_root and \
4746 ("remove" in self._dynamic_config.myparams or
4747 self._frozen_config.myopts.get("--root-deps") is not None):
4748 # Only pull in deps for the relevant root.
4750 depgraph_sets = self._dynamic_config.sets[root]
4751 required_set_names = self._frozen_config._required_set_names.copy()
4752 remaining_args = required_set_names.copy()
4753 if required_sets is None or root not in required_sets:
4756 # Removal actions may override sets with temporary
4757 # replacements that have had atoms removed in order
4758 # to implement --deselect behavior.
4759 required_set_names = set(required_sets[root])
4760 depgraph_sets.sets.clear()
4761 depgraph_sets.sets.update(required_sets[root])
4762 if "remove" not in self._dynamic_config.myparams and \
4763 root == self._frozen_config.target_root and \
4765 remaining_args.difference_update(depgraph_sets.sets)
4766 if not remaining_args and \
4767 not self._dynamic_config._ignored_deps and \
4768 not self._dynamic_config._dep_stack:
4770 root_config = self._frozen_config.roots[root]
4771 for s in required_set_names:
4772 pset = depgraph_sets.sets.get(s)
4774 pset = root_config.sets[s]
4775 atom = SETPREFIX + s
4776 args.append(SetArg(arg=atom, pset=pset,
4777 root_config=root_config))
4779 self._set_args(args)
4780 for arg in self._expand_set_args(args, add_to_digraph=True):
4781 for atom in arg.pset.getAtoms():
4782 self._dynamic_config._dep_stack.append(
4783 Dependency(atom=atom, root=arg.root_config.root,
4787 if self._dynamic_config._ignored_deps:
4788 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4789 self._dynamic_config._ignored_deps = []
4790 if not self._create_graph(allow_unsatisfied=True):
4792 # Check the unsatisfied deps to see if any initially satisfied deps
4793 # will become unsatisfied due to an upgrade. Initially unsatisfied
4794 # deps are irrelevant since we only want to avoid breaking deps
4795 # that are initially satisfied.
4796 while self._dynamic_config._unsatisfied_deps:
4797 dep = self._dynamic_config._unsatisfied_deps.pop()
4798 vardb = self._frozen_config.roots[
4799 dep.root].trees["vartree"].dbapi
4800 matches = vardb.match_pkgs(dep.atom)
4802 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4804 # An scheduled installation broke a deep dependency.
4805 # Add the installed package to the graph so that it
4806 # will be appropriately reported as a slot collision
4807 # (possibly solvable via backtracking).
4808 pkg = matches[-1] # highest match
4809 if not self._add_pkg(pkg, dep):
4811 if not self._create_graph(allow_unsatisfied=True):
4815 def _pkg(self, cpv, type_name, root_config, installed=False,
4816 onlydeps=False, myrepo = None):
4818 Get a package instance from the cache, or create a new
4819 one if necessary. Raises PackageNotFound from aux_get if it
4820 failures for some reason (package does not exist or is
4824 # Ensure that we use the specially optimized RootConfig instance
4825 # that refers to FakeVartree instead of the real vartree.
4826 root_config = self._frozen_config.roots[root_config.root]
4827 pkg = self._frozen_config._pkg_cache.get(
4828 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4829 repo_name=myrepo, root_config=root_config,
4830 installed=installed, onlydeps=onlydeps))
4831 if pkg is None and onlydeps and not installed:
4832 # Maybe it already got pulled in as a "merge" node.
4833 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4834 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4835 repo_name=myrepo, root_config=root_config,
4836 installed=installed, onlydeps=False))
4839 tree_type = self.pkg_tree_map[type_name]
4840 db = root_config.trees[tree_type].dbapi
4841 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4842 tree_type].dbapi._aux_cache_keys)
4845 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4847 raise portage.exception.PackageNotFound(cpv)
4849 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4850 installed=installed, metadata=metadata, onlydeps=onlydeps,
4851 root_config=root_config, type_name=type_name)
4853 self._frozen_config._pkg_cache[pkg] = pkg
4855 if not self._pkg_visibility_check(pkg) and \
4856 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4857 slot_key = (pkg.root, pkg.slot_atom)
4858 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4859 if other_pkg is None or pkg > other_pkg:
4860 self._frozen_config._highest_license_masked[slot_key] = pkg
4864 def _validate_blockers(self):
4865 """Remove any blockers from the digraph that do not match any of the
4866 packages within the graph. If necessary, create hard deps to ensure
4867 correct merge order such that mutually blocking packages are never
4868 installed simultaneously. Also add runtime blockers from all installed
4869 packages if any of them haven't been added already (bug 128809)."""
4871 if "--buildpkgonly" in self._frozen_config.myopts or \
4872 "--nodeps" in self._frozen_config.myopts:
4876 # Pull in blockers from all installed packages that haven't already
4877 # been pulled into the depgraph, in order to ensure that they are
4878 # respected (bug 128809). Due to the performance penalty that is
4879 # incurred by all the additional dep_check calls that are required,
4880 # blockers returned from dep_check are cached on disk by the
4881 # BlockerCache class.
4883 # For installed packages, always ignore blockers from DEPEND since
4884 # only runtime dependencies should be relevant for packages that
4885 # are already built.
4886 dep_keys = ["RDEPEND", "PDEPEND"]
4887 for myroot in self._frozen_config.trees:
4889 if self._frozen_config.myopts.get("--root-deps") is not None and \
4890 myroot != self._frozen_config.target_root:
4893 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4894 pkgsettings = self._frozen_config.pkgsettings[myroot]
4895 root_config = self._frozen_config.roots[myroot]
4896 final_db = self._dynamic_config.mydbapi[myroot]
4898 blocker_cache = BlockerCache(myroot, vardb)
4899 stale_cache = set(blocker_cache)
4902 stale_cache.discard(cpv)
4903 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4905 pkg in self._dynamic_config._traversed_pkg_deps
4907 # Check for masked installed packages. Only warn about
4908 # packages that are in the graph in order to avoid warning
4909 # about those that will be automatically uninstalled during
4910 # the merge process or by --depclean. Always warn about
4911 # packages masked by license, since the user likely wants
4912 # to adjust ACCEPT_LICENSE.
4914 if not self._pkg_visibility_check(pkg,
4915 trust_graph=False) and \
4916 (pkg_in_graph or 'LICENSE' in pkg.masks):
4917 self._dynamic_config._masked_installed.add(pkg)
4919 self._check_masks(pkg)
4921 blocker_atoms = None
4927 self._dynamic_config._blocker_parents.child_nodes(pkg))
4932 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4936 # Select just the runtime blockers.
4937 blockers = [blocker for blocker in blockers \
4938 if blocker.priority.runtime or \
4939 blocker.priority.runtime_post]
4940 if blockers is not None:
4941 blockers = set(blocker.atom for blocker in blockers)
4943 # If this node has any blockers, create a "nomerge"
4944 # node for it so that they can be enforced.
4945 self._spinner_update()
4946 blocker_data = blocker_cache.get(cpv)
4947 if blocker_data is not None and \
4948 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4951 # If blocker data from the graph is available, use
4952 # it to validate the cache and update the cache if
4954 if blocker_data is not None and \
4955 blockers is not None:
4956 if not blockers.symmetric_difference(
4957 blocker_data.atoms):
4961 if blocker_data is None and \
4962 blockers is not None:
4963 # Re-use the blockers from the graph.
4964 blocker_atoms = sorted(blockers)
4965 counter = long(pkg.metadata["COUNTER"])
4967 blocker_cache.BlockerData(counter, blocker_atoms)
4968 blocker_cache[pkg.cpv] = blocker_data
4972 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4974 # Use aux_get() to trigger FakeVartree global
4975 # updates on *DEPEND when appropriate.
4976 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4977 # It is crucial to pass in final_db here in order to
4978 # optimize dep_check calls by eliminating atoms via
4979 # dep_wordreduce and dep_eval calls.
4981 success, atoms = portage.dep_check(depstr,
4982 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4983 trees=self._dynamic_config._graph_trees, myroot=myroot)
4986 except Exception as e:
4987 # This is helpful, for example, if a ValueError
4988 # is thrown from cpv_expand due to multiple
4989 # matches (this can happen if an atom lacks a
4991 show_invalid_depstring_notice(
4992 pkg, depstr, _unicode_decode("%s") % (e,))
4996 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4997 if replacement_pkg and \
4998 replacement_pkg[0].operation == "merge":
4999 # This package is being replaced anyway, so
5000 # ignore invalid dependencies so as not to
5001 # annoy the user too much (otherwise they'd be
5002 # forced to manually unmerge it first).
5004 show_invalid_depstring_notice(pkg, depstr, atoms)
5006 blocker_atoms = [myatom for myatom in atoms \
5008 blocker_atoms.sort()
5009 counter = long(pkg.metadata["COUNTER"])
5010 blocker_cache[cpv] = \
5011 blocker_cache.BlockerData(counter, blocker_atoms)
5014 for atom in blocker_atoms:
5015 blocker = Blocker(atom=atom,
5016 eapi=pkg.metadata["EAPI"],
5017 priority=self._priority(runtime=True),
5019 self._dynamic_config._blocker_parents.add(blocker, pkg)
5020 except portage.exception.InvalidAtom as e:
5021 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5022 show_invalid_depstring_notice(
5024 _unicode_decode("Invalid Atom: %s") % (e,))
5026 for cpv in stale_cache:
5027 del blocker_cache[cpv]
5028 blocker_cache.flush()
5031 # Discard any "uninstall" tasks scheduled by previous calls
5032 # to this method, since those tasks may not make sense given
5033 # the current graph state.
5034 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
5035 if previous_uninstall_tasks:
5036 self._dynamic_config._blocker_uninstalls = digraph()
5037 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
5039 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
5040 self._spinner_update()
5041 root_config = self._frozen_config.roots[blocker.root]
5042 virtuals = root_config.settings.getvirtuals()
5043 myroot = blocker.root
5044 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
5045 final_db = self._dynamic_config.mydbapi[myroot]
5047 provider_virtual = False
5048 if blocker.cp in virtuals and \
5049 not self._have_new_virt(blocker.root, blocker.cp):
5050 provider_virtual = True
5052 # Use this to check PROVIDE for each matched package
5054 atom_set = InternalPackageSet(
5055 initial_atoms=[blocker.atom])
5057 if provider_virtual:
5059 for provider_entry in virtuals[blocker.cp]:
5060 atoms.append(Atom(blocker.atom.replace(
5061 blocker.cp, provider_entry.cp, 1)))
5063 atoms = [blocker.atom]
5065 blocked_initial = set()
5067 for pkg in initial_db.match_pkgs(atom):
5068 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5069 blocked_initial.add(pkg)
5071 blocked_final = set()
5073 for pkg in final_db.match_pkgs(atom):
5074 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
5075 blocked_final.add(pkg)
5077 if not blocked_initial and not blocked_final:
5078 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
5079 self._dynamic_config._blocker_parents.remove(blocker)
5080 # Discard any parents that don't have any more blockers.
5081 for pkg in parent_pkgs:
5082 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
5083 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
5084 self._dynamic_config._blocker_parents.remove(pkg)
5086 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
5087 unresolved_blocks = False
5088 depends_on_order = set()
5089 for pkg in blocked_initial:
5090 if pkg.slot_atom == parent.slot_atom and \
5091 not blocker.atom.blocker.overlap.forbid:
5092 # New !!atom blockers do not allow temporary
5093 # simulaneous installation, so unlike !atom
5094 # blockers, !!atom blockers aren't ignored
5095 # when they match other packages occupying
5098 if parent.installed:
5099 # Two currently installed packages conflict with
5100 # eachother. Ignore this case since the damage
5101 # is already done and this would be likely to
5102 # confuse users if displayed like a normal blocker.
5105 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5107 if parent.operation == "merge":
5108 # Maybe the blocked package can be replaced or simply
5109 # unmerged to resolve this block.
5110 depends_on_order.add((pkg, parent))
5112 # None of the above blocker resolutions techniques apply,
5113 # so apparently this one is unresolvable.
5114 unresolved_blocks = True
5115 for pkg in blocked_final:
5116 if pkg.slot_atom == parent.slot_atom and \
5117 not blocker.atom.blocker.overlap.forbid:
5118 # New !!atom blockers do not allow temporary
5119 # simulaneous installation, so unlike !atom
5120 # blockers, !!atom blockers aren't ignored
5121 # when they match other packages occupying
5124 if parent.operation == "nomerge" and \
5125 pkg.operation == "nomerge":
5126 # This blocker will be handled the next time that a
5127 # merge of either package is triggered.
5130 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
5132 # Maybe the blocking package can be
5133 # unmerged to resolve this block.
5134 if parent.operation == "merge" and pkg.installed:
5135 depends_on_order.add((pkg, parent))
5137 elif parent.operation == "nomerge":
5138 depends_on_order.add((parent, pkg))
5140 # None of the above blocker resolutions techniques apply,
5141 # so apparently this one is unresolvable.
5142 unresolved_blocks = True
5144 # Make sure we don't unmerge any package that have been pulled
5146 if not unresolved_blocks and depends_on_order:
5147 for inst_pkg, inst_task in depends_on_order:
5148 if self._dynamic_config.digraph.contains(inst_pkg) and \
5149 self._dynamic_config.digraph.parent_nodes(inst_pkg):
5150 unresolved_blocks = True
5153 if not unresolved_blocks and depends_on_order:
5154 for inst_pkg, inst_task in depends_on_order:
5155 uninst_task = Package(built=inst_pkg.built,
5156 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5157 metadata=inst_pkg.metadata,
5158 operation="uninstall",
5159 root_config=inst_pkg.root_config,
5160 type_name=inst_pkg.type_name)
5161 # Enforce correct merge order with a hard dep.
5162 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
5163 priority=BlockerDepPriority.instance)
5164 # Count references to this blocker so that it can be
5165 # invalidated after nodes referencing it have been
5167 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
5168 if not unresolved_blocks and not depends_on_order:
5169 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
5170 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
5171 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
5172 self._dynamic_config._blocker_parents.remove(blocker)
5173 if not self._dynamic_config._blocker_parents.child_nodes(parent):
5174 self._dynamic_config._blocker_parents.remove(parent)
5175 if unresolved_blocks:
5176 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
5180 def _accept_blocker_conflicts(self):
5182 for x in ("--buildpkgonly", "--fetchonly",
5183 "--fetch-all-uri", "--nodeps"):
5184 if x in self._frozen_config.myopts:
5189 def _merge_order_bias(self, mygraph):
5191 For optimal leaf node selection, promote deep system runtime deps and
5192 order nodes from highest to lowest overall reference count.
5196 for node in mygraph.order:
5197 node_info[node] = len(mygraph.parent_nodes(node))
5198 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
5200 def cmp_merge_preference(node1, node2):
5202 if node1.operation == 'uninstall':
5203 if node2.operation == 'uninstall':
5207 if node2.operation == 'uninstall':
5208 if node1.operation == 'uninstall':
5212 node1_sys = node1 in deep_system_deps
5213 node2_sys = node2 in deep_system_deps
5214 if node1_sys != node2_sys:
5219 return node_info[node2] - node_info[node1]
5221 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
5223 def altlist(self, reversed=False):
5225 while self._dynamic_config._serialized_tasks_cache is None:
5226 self._resolve_conflicts()
5228 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
5229 self._serialize_tasks()
5230 except self._serialize_tasks_retry:
5233 retlist = self._dynamic_config._serialized_tasks_cache[:]
5238 def _implicit_libc_deps(self, mergelist, graph):
5240 Create implicit dependencies on libc, in order to ensure that libc
5241 is installed as early as possible (see bug #303567).
5244 implicit_libc_roots = (self._frozen_config._running_root.root,)
5245 for root in implicit_libc_roots:
5246 graphdb = self._dynamic_config.mydbapi[root]
5247 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5248 for atom in self._expand_virt_from_graph(root,
5249 portage.const.LIBC_PACKAGE_ATOM):
5252 match = graphdb.match_pkgs(atom)
5256 if pkg.operation == "merge" and \
5257 not vardb.cpv_exists(pkg.cpv):
5258 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
5263 earlier_libc_pkgs = set()
5265 for pkg in mergelist:
5266 if not isinstance(pkg, Package):
5267 # a satisfied blocker
5269 root_libc_pkgs = libc_pkgs.get(pkg.root)
5270 if root_libc_pkgs is not None and \
5271 pkg.operation == "merge":
5272 if pkg in root_libc_pkgs:
5273 earlier_libc_pkgs.add(pkg)
5275 for libc_pkg in root_libc_pkgs:
5276 if libc_pkg in earlier_libc_pkgs:
5277 graph.add(libc_pkg, pkg,
5278 priority=DepPriority(buildtime=True))
5280 def schedulerGraph(self):
5282 The scheduler graph is identical to the normal one except that
5283 uninstall edges are reversed in specific cases that require
5284 conflicting packages to be temporarily installed simultaneously.
5285 This is intended for use by the Scheduler in it's parallelization
5286 logic. It ensures that temporary simultaneous installation of
5287 conflicting packages is avoided when appropriate (especially for
5288 !!atom blockers), but allowed in specific cases that require it.
5290 Note that this method calls break_refs() which alters the state of
5291 internal Package instances such that this depgraph instance should
5292 not be used to perform any more calculations.
5295 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
5296 mergelist = self.altlist()
5297 self._implicit_libc_deps(mergelist,
5298 self._dynamic_config._scheduler_graph)
5300 # Break DepPriority.satisfied attributes which reference
5301 # installed Package instances.
5302 for parents, children, node in \
5303 self._dynamic_config._scheduler_graph.nodes.values():
5304 for priorities in chain(parents.values(), children.values()):
5305 for priority in priorities:
5306 if priority.satisfied:
5307 priority.satisfied = True
5309 pkg_cache = self._frozen_config._pkg_cache
5310 graph = self._dynamic_config._scheduler_graph
5311 trees = self._frozen_config.trees
5312 pruned_pkg_cache = {}
5313 for key, pkg in pkg_cache.items():
5314 if pkg in graph or \
5315 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
5316 pruned_pkg_cache[key] = pkg
5319 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
5323 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
5327 def break_refs(self):
5329 Break any references in Package instances that lead back to the depgraph.
5330 This is useful if you want to hold references to packages without also
5331 holding the depgraph on the heap. It should only be called after the
5332 depgraph and _frozen_config will not be used for any more calculations.
5334 for root_config in self._frozen_config.roots.values():
5335 root_config.update(self._frozen_config._trees_orig[
5336 root_config.root]["root_config"])
5337 # Both instances are now identical, so discard the
5338 # original which should have no other references.
5339 self._frozen_config._trees_orig[
5340 root_config.root]["root_config"] = root_config
5342 def _resolve_conflicts(self):
5344 if "complete" not in self._dynamic_config.myparams and \
5345 self._dynamic_config._allow_backtracking and \
5346 self._dynamic_config._slot_collision_nodes and \
5347 not self._accept_blocker_conflicts():
5348 self._dynamic_config.myparams["complete"] = True
5350 if not self._complete_graph():
5351 raise self._unknown_internal_error()
5353 self._process_slot_conflicts()
5355 self._slot_operator_trigger_reinstalls()
5357 if not self._validate_blockers():
5358 self._dynamic_config._skip_restart = True
5359 raise self._unknown_internal_error()
5361 def _serialize_tasks(self):
5363 debug = "--debug" in self._frozen_config.myopts
5366 writemsg("\ndigraph:\n\n", noiselevel=-1)
5367 self._dynamic_config.digraph.debug_print()
5368 writemsg("\n", noiselevel=-1)
5370 scheduler_graph = self._dynamic_config.digraph.copy()
5372 if '--nodeps' in self._frozen_config.myopts:
5373 # Preserve the package order given on the command line.
5374 return ([node for node in scheduler_graph \
5375 if isinstance(node, Package) \
5376 and node.operation == 'merge'], scheduler_graph)
5378 mygraph=self._dynamic_config.digraph.copy()
5380 removed_nodes = set()
5382 # Prune off all DependencyArg instances since they aren't
5383 # needed, and because of nested sets this is faster than doing
5384 # it with multiple digraph.root_nodes() calls below. This also
5385 # takes care of nested sets that have circular references,
5386 # which wouldn't be matched by digraph.root_nodes().
5387 for node in mygraph:
5388 if isinstance(node, DependencyArg):
5389 removed_nodes.add(node)
5391 mygraph.difference_update(removed_nodes)
5392 removed_nodes.clear()
5394 # Prune "nomerge" root nodes if nothing depends on them, since
5395 # otherwise they slow down merge order calculation. Don't remove
5396 # non-root nodes since they help optimize merge order in some cases
5397 # such as revdep-rebuild.
5400 for node in mygraph.root_nodes():
5401 if not isinstance(node, Package) or \
5402 node.installed or node.onlydeps:
5403 removed_nodes.add(node)
5405 self._spinner_update()
5406 mygraph.difference_update(removed_nodes)
5407 if not removed_nodes:
5409 removed_nodes.clear()
5410 self._merge_order_bias(mygraph)
5411 def cmp_circular_bias(n1, n2):
5413 RDEPEND is stronger than PDEPEND and this function
5414 measures such a strength bias within a circular
5415 dependency relationship.
5417 n1_n2_medium = n2 in mygraph.child_nodes(n1,
5418 ignore_priority=priority_range.ignore_medium_soft)
5419 n2_n1_medium = n1 in mygraph.child_nodes(n2,
5420 ignore_priority=priority_range.ignore_medium_soft)
5421 if n1_n2_medium == n2_n1_medium:
5426 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
5428 # Contains uninstall tasks that have been scheduled to
5429 # occur after overlapping blockers have been installed.
5430 scheduled_uninstalls = set()
5431 # Contains any Uninstall tasks that have been ignored
5432 # in order to avoid the circular deps code path. These
5433 # correspond to blocker conflicts that could not be
5435 ignored_uninstall_tasks = set()
5436 have_uninstall_task = False
5437 complete = "complete" in self._dynamic_config.myparams
5440 def get_nodes(**kwargs):
5442 Returns leaf nodes excluding Uninstall instances
5443 since those should be executed as late as possible.
5445 return [node for node in mygraph.leaf_nodes(**kwargs) \
5446 if isinstance(node, Package) and \
5447 (node.operation != "uninstall" or \
5448 node in scheduled_uninstalls)]
5450 # sys-apps/portage needs special treatment if ROOT="/"
5451 running_root = self._frozen_config._running_root.root
5452 runtime_deps = InternalPackageSet(
5453 initial_atoms=[PORTAGE_PACKAGE_ATOM])
5454 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
5455 PORTAGE_PACKAGE_ATOM)
5456 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
5457 PORTAGE_PACKAGE_ATOM)
5460 running_portage = running_portage[0]
5462 running_portage = None
5464 if replacement_portage:
5465 replacement_portage = replacement_portage[0]
5467 replacement_portage = None
5469 if replacement_portage == running_portage:
5470 replacement_portage = None
5472 if running_portage is not None:
5474 portage_rdepend = self._select_atoms_highest_available(
5475 running_root, running_portage.metadata["RDEPEND"],
5476 myuse=self._pkg_use_enabled(running_portage),
5477 parent=running_portage, strict=False)
5478 except portage.exception.InvalidDependString as e:
5479 portage.writemsg("!!! Invalid RDEPEND in " + \
5480 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5481 (running_root, running_portage.cpv, e), noiselevel=-1)
5483 portage_rdepend = {running_portage : []}
5484 for atoms in portage_rdepend.values():
5485 runtime_deps.update(atom for atom in atoms \
5486 if not atom.blocker)
5488 # Merge libc asap, in order to account for implicit
5489 # dependencies. See bug #303567.
5490 implicit_libc_roots = (running_root,)
5491 for root in implicit_libc_roots:
5493 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5494 graphdb = self._dynamic_config.mydbapi[root]
5495 for atom in self._expand_virt_from_graph(root,
5496 portage.const.LIBC_PACKAGE_ATOM):
5499 match = graphdb.match_pkgs(atom)
5503 if pkg.operation == "merge" and \
5504 not vardb.cpv_exists(pkg.cpv):
5508 # If there's also an os-headers upgrade, we need to
5509 # pull that in first. See bug #328317.
5510 for atom in self._expand_virt_from_graph(root,
5511 portage.const.OS_HEADERS_PACKAGE_ATOM):
5514 match = graphdb.match_pkgs(atom)
5518 if pkg.operation == "merge" and \
5519 not vardb.cpv_exists(pkg.cpv):
5520 asap_nodes.append(pkg)
5522 asap_nodes.extend(libc_pkgs)
5524 def gather_deps(ignore_priority, mergeable_nodes,
5525 selected_nodes, node):
5527 Recursively gather a group of nodes that RDEPEND on
5528 eachother. This ensures that they are merged as a group
5529 and get their RDEPENDs satisfied as soon as possible.
5531 if node in selected_nodes:
5533 if node not in mergeable_nodes:
5535 if node == replacement_portage and \
5536 mygraph.child_nodes(node,
5537 ignore_priority=priority_range.ignore_medium_soft):
5538 # Make sure that portage always has all of it's
5539 # RDEPENDs installed first.
5541 selected_nodes.add(node)
5542 for child in mygraph.child_nodes(node,
5543 ignore_priority=ignore_priority):
5544 if not gather_deps(ignore_priority,
5545 mergeable_nodes, selected_nodes, child):
5549 def ignore_uninst_or_med(priority):
5550 if priority is BlockerDepPriority.instance:
5552 return priority_range.ignore_medium(priority)
5554 def ignore_uninst_or_med_soft(priority):
5555 if priority is BlockerDepPriority.instance:
5557 return priority_range.ignore_medium_soft(priority)
5559 tree_mode = "--tree" in self._frozen_config.myopts
5560 # Tracks whether or not the current iteration should prefer asap_nodes
5561 # if available. This is set to False when the previous iteration
5562 # failed to select any nodes. It is reset whenever nodes are
5563 # successfully selected.
5566 # Controls whether or not the current iteration should drop edges that
5567 # are "satisfied" by installed packages, in order to solve circular
5568 # dependencies. The deep runtime dependencies of installed packages are
5569 # not checked in this case (bug #199856), so it must be avoided
5570 # whenever possible.
5571 drop_satisfied = False
5573 # State of variables for successive iterations that loosen the
5574 # criteria for node selection.
5576 # iteration prefer_asap drop_satisfied
5581 # If no nodes are selected on the last iteration, it is due to
5582 # unresolved blockers or circular dependencies.
5585 self._spinner_update()
5586 selected_nodes = None
5587 ignore_priority = None
5588 if drop_satisfied or (prefer_asap and asap_nodes):
5589 priority_range = DepPrioritySatisfiedRange
5591 priority_range = DepPriorityNormalRange
5592 if prefer_asap and asap_nodes:
5593 # ASAP nodes are merged before their soft deps. Go ahead and
5594 # select root nodes here if necessary, since it's typical for
5595 # the parent to have been removed from the graph already.
5596 asap_nodes = [node for node in asap_nodes \
5597 if mygraph.contains(node)]
5598 for i in range(priority_range.SOFT,
5599 priority_range.MEDIUM_SOFT + 1):
5600 ignore_priority = priority_range.ignore_priority[i]
5601 for node in asap_nodes:
5602 if not mygraph.child_nodes(node,
5603 ignore_priority=ignore_priority):
5604 selected_nodes = [node]
5605 asap_nodes.remove(node)
5610 if not selected_nodes and \
5611 not (prefer_asap and asap_nodes):
5612 for i in range(priority_range.NONE,
5613 priority_range.MEDIUM_SOFT + 1):
5614 ignore_priority = priority_range.ignore_priority[i]
5615 nodes = get_nodes(ignore_priority=ignore_priority)
5617 # If there is a mixture of merges and uninstalls,
5618 # do the uninstalls first.
5619 good_uninstalls = None
5621 good_uninstalls = []
5623 if node.operation == "uninstall":
5624 good_uninstalls.append(node)
5627 nodes = good_uninstalls
5631 if good_uninstalls or len(nodes) == 1 or \
5632 (ignore_priority is None and \
5633 not asap_nodes and not tree_mode):
5634 # Greedily pop all of these nodes since no
5635 # relationship has been ignored. This optimization
5636 # destroys --tree output, so it's disabled in tree
5638 selected_nodes = nodes
5640 # For optimal merge order:
5641 # * Only pop one node.
5642 # * Removing a root node (node without a parent)
5643 # will not produce a leaf node, so avoid it.
5644 # * It's normal for a selected uninstall to be a
5645 # root node, so don't check them for parents.
5647 prefer_asap_parents = (True, False)
5649 prefer_asap_parents = (False,)
5650 for check_asap_parent in prefer_asap_parents:
5651 if check_asap_parent:
5653 parents = mygraph.parent_nodes(node,
5654 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5655 if any(x in asap_nodes for x in parents):
5656 selected_nodes = [node]
5660 if mygraph.parent_nodes(node):
5661 selected_nodes = [node]
5668 if not selected_nodes:
5669 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
5671 mergeable_nodes = set(nodes)
5672 if prefer_asap and asap_nodes:
5674 # When gathering the nodes belonging to a runtime cycle,
5675 # we want to minimize the number of nodes gathered, since
5676 # this tends to produce a more optimal merge order.
5677 # Ignoring all medium_soft deps serves this purpose.
5678 # In the case of multiple runtime cycles, where some cycles
5679 # may depend on smaller independent cycles, it's optimal
5680 # to merge smaller independent cycles before other cycles
5681 # that depend on them. Therefore, we search for the
5682 # smallest cycle in order to try and identify and prefer
5683 # these smaller independent cycles.
5684 ignore_priority = priority_range.ignore_medium_soft
5685 smallest_cycle = None
5687 if not mygraph.parent_nodes(node):
5689 selected_nodes = set()
5690 if gather_deps(ignore_priority,
5691 mergeable_nodes, selected_nodes, node):
5692 # When selecting asap_nodes, we need to ensure
5693 # that we haven't selected a large runtime cycle
5694 # that is obviously sub-optimal. This will be
5695 # obvious if any of the non-asap selected_nodes
5696 # is a leaf node when medium_soft deps are
5698 if prefer_asap and asap_nodes and \
5699 len(selected_nodes) > 1:
5700 for node in selected_nodes.difference(
5702 if not mygraph.child_nodes(node,
5704 DepPriorityNormalRange.ignore_medium_soft):
5705 selected_nodes = None
5708 if smallest_cycle is None or \
5709 len(selected_nodes) < len(smallest_cycle):
5710 smallest_cycle = selected_nodes
5712 selected_nodes = smallest_cycle
5714 if selected_nodes and debug:
5715 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
5716 (len(selected_nodes),), noiselevel=-1)
5717 cycle_digraph = mygraph.copy()
5718 cycle_digraph.difference_update([x for x in
5719 cycle_digraph if x not in selected_nodes])
5720 cycle_digraph.debug_print()
5721 writemsg("\n", noiselevel=-1)
5723 if prefer_asap and asap_nodes and not selected_nodes:
5724 # We failed to find any asap nodes to merge, so ignore
5725 # them for the next iteration.
5729 if selected_nodes and ignore_priority is not None:
5730 # Try to merge ignored medium_soft deps as soon as possible
5731 # if they're not satisfied by installed packages.
5732 for node in selected_nodes:
5733 children = set(mygraph.child_nodes(node))
5734 soft = children.difference(
5735 mygraph.child_nodes(node,
5736 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
5737 medium_soft = children.difference(
5738 mygraph.child_nodes(node,
5740 DepPrioritySatisfiedRange.ignore_medium_soft))
5741 medium_soft.difference_update(soft)
5742 for child in medium_soft:
5743 if child in selected_nodes:
5745 if child in asap_nodes:
5747 # Merge PDEPEND asap for bug #180045.
5748 asap_nodes.append(child)
5750 if selected_nodes and len(selected_nodes) > 1:
5751 if not isinstance(selected_nodes, list):
5752 selected_nodes = list(selected_nodes)
5753 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5755 if not selected_nodes and myblocker_uninstalls:
5756 # An Uninstall task needs to be executed in order to
5757 # avoid conflict if possible.
5760 priority_range = DepPrioritySatisfiedRange
5762 priority_range = DepPriorityNormalRange
5764 mergeable_nodes = get_nodes(
5765 ignore_priority=ignore_uninst_or_med)
5767 min_parent_deps = None
5770 for task in myblocker_uninstalls.leaf_nodes():
5771 # Do some sanity checks so that system or world packages
5772 # don't get uninstalled inappropriately here (only really
5773 # necessary when --complete-graph has not been enabled).
5775 if task in ignored_uninstall_tasks:
5778 if task in scheduled_uninstalls:
5779 # It's been scheduled but it hasn't
5780 # been executed yet due to dependence
5781 # on installation of blocking packages.
5784 root_config = self._frozen_config.roots[task.root]
5785 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5788 if self._dynamic_config.digraph.contains(inst_pkg):
5791 forbid_overlap = False
5792 heuristic_overlap = False
5793 for blocker in myblocker_uninstalls.parent_nodes(task):
5794 if not eapi_has_strong_blocks(blocker.eapi):
5795 heuristic_overlap = True
5796 elif blocker.atom.blocker.overlap.forbid:
5797 forbid_overlap = True
5799 if forbid_overlap and running_root == task.root:
5802 if heuristic_overlap and running_root == task.root:
5803 # Never uninstall sys-apps/portage or it's essential
5804 # dependencies, except through replacement.
5806 runtime_dep_atoms = \
5807 list(runtime_deps.iterAtomsForPackage(task))
5808 except portage.exception.InvalidDependString as e:
5809 portage.writemsg("!!! Invalid PROVIDE in " + \
5810 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5811 (task.root, task.cpv, e), noiselevel=-1)
5815 # Don't uninstall a runtime dep if it appears
5816 # to be the only suitable one installed.
5818 vardb = root_config.trees["vartree"].dbapi
5819 for atom in runtime_dep_atoms:
5820 other_version = None
5821 for pkg in vardb.match_pkgs(atom):
5822 if pkg.cpv == task.cpv and \
5823 pkg.metadata["COUNTER"] == \
5824 task.metadata["COUNTER"]:
5828 if other_version is None:
5834 # For packages in the system set, don't take
5835 # any chances. If the conflict can't be resolved
5836 # by a normal replacement operation then abort.
5839 for atom in root_config.sets[
5840 "system"].iterAtomsForPackage(task):
5843 except portage.exception.InvalidDependString as e:
5844 portage.writemsg("!!! Invalid PROVIDE in " + \
5845 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5846 (task.root, task.cpv, e), noiselevel=-1)
5852 # Note that the world check isn't always
5853 # necessary since self._complete_graph() will
5854 # add all packages from the system and world sets to the
5855 # graph. This just allows unresolved conflicts to be
5856 # detected as early as possible, which makes it possible
5857 # to avoid calling self._complete_graph() when it is
5858 # unnecessary due to blockers triggering an abortion.
5860 # For packages in the world set, go ahead an uninstall
5861 # when necessary, as long as the atom will be satisfied
5862 # in the final state.
5863 graph_db = self._dynamic_config.mydbapi[task.root]
5866 for atom in root_config.sets[
5867 "selected"].iterAtomsForPackage(task):
5869 for pkg in graph_db.match_pkgs(atom):
5876 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5878 except portage.exception.InvalidDependString as e:
5879 portage.writemsg("!!! Invalid PROVIDE in " + \
5880 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5881 (task.root, task.cpv, e), noiselevel=-1)
5887 # Check the deps of parent nodes to ensure that
5888 # the chosen task produces a leaf node. Maybe
5889 # this can be optimized some more to make the
5890 # best possible choice, but the current algorithm
5891 # is simple and should be near optimal for most
5893 self._spinner_update()
5894 mergeable_parent = False
5896 parent_deps.add(task)
5897 for parent in mygraph.parent_nodes(task):
5898 parent_deps.update(mygraph.child_nodes(parent,
5899 ignore_priority=priority_range.ignore_medium_soft))
5900 if min_parent_deps is not None and \
5901 len(parent_deps) >= min_parent_deps:
5902 # This task is no better than a previously selected
5903 # task, so abort search now in order to avoid wasting
5904 # any more cpu time on this task. This increases
5905 # performance dramatically in cases when there are
5906 # hundreds of blockers to solve, like when
5907 # upgrading to a new slot of kde-meta.
5908 mergeable_parent = None
5910 if parent in mergeable_nodes and \
5911 gather_deps(ignore_uninst_or_med_soft,
5912 mergeable_nodes, set(), parent):
5913 mergeable_parent = True
5915 if not mergeable_parent:
5918 if min_parent_deps is None or \
5919 len(parent_deps) < min_parent_deps:
5920 min_parent_deps = len(parent_deps)
5923 if uninst_task is not None and min_parent_deps == 1:
5924 # This is the best possible result, so so abort search
5925 # now in order to avoid wasting any more cpu time.
5928 if uninst_task is not None:
5929 # The uninstall is performed only after blocking
5930 # packages have been merged on top of it. File
5931 # collisions between blocking packages are detected
5932 # and removed from the list of files to be uninstalled.
5933 scheduled_uninstalls.add(uninst_task)
5934 parent_nodes = mygraph.parent_nodes(uninst_task)
5936 # Reverse the parent -> uninstall edges since we want
5937 # to do the uninstall after blocking packages have
5938 # been merged on top of it.
5939 mygraph.remove(uninst_task)
5940 for blocked_pkg in parent_nodes:
5941 mygraph.add(blocked_pkg, uninst_task,
5942 priority=BlockerDepPriority.instance)
5943 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5944 scheduler_graph.add(blocked_pkg, uninst_task,
5945 priority=BlockerDepPriority.instance)
5947 # Sometimes a merge node will render an uninstall
5948 # node unnecessary (due to occupying the same SLOT),
5949 # and we want to avoid executing a separate uninstall
5950 # task in that case.
5951 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5952 ].match_pkgs(uninst_task.slot_atom)
5954 slot_node[0].operation == "merge":
5955 mygraph.add(slot_node[0], uninst_task,
5956 priority=BlockerDepPriority.instance)
5958 # Reset the state variables for leaf node selection and
5959 # continue trying to select leaf nodes.
5961 drop_satisfied = False
5964 if not selected_nodes:
5965 # Only select root nodes as a last resort. This case should
5966 # only trigger when the graph is nearly empty and the only
5967 # remaining nodes are isolated (no parents or children). Since
5968 # the nodes must be isolated, ignore_priority is not needed.
5969 selected_nodes = get_nodes()
5971 if not selected_nodes and not drop_satisfied:
5972 drop_satisfied = True
5975 if not selected_nodes and myblocker_uninstalls:
5976 # If possible, drop an uninstall task here in order to avoid
5977 # the circular deps code path. The corresponding blocker will
5978 # still be counted as an unresolved conflict.
5980 for node in myblocker_uninstalls.leaf_nodes():
5982 mygraph.remove(node)
5987 ignored_uninstall_tasks.add(node)
5990 if uninst_task is not None:
5991 # Reset the state variables for leaf node selection and
5992 # continue trying to select leaf nodes.
5994 drop_satisfied = False
5997 if not selected_nodes:
5998 self._dynamic_config._circular_deps_for_display = mygraph
5999 self._dynamic_config._skip_restart = True
6000 raise self._unknown_internal_error()
6002 # At this point, we've succeeded in selecting one or more nodes, so
6003 # reset state variables for leaf node selection.
6005 drop_satisfied = False
6007 mygraph.difference_update(selected_nodes)
6009 for node in selected_nodes:
6010 if isinstance(node, Package) and \
6011 node.operation == "nomerge":
6014 # Handle interactions between blockers
6015 # and uninstallation tasks.
6016 solved_blockers = set()
6018 if isinstance(node, Package) and \
6019 "uninstall" == node.operation:
6020 have_uninstall_task = True
6023 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
6024 inst_pkg = vardb.match_pkgs(node.slot_atom)
6026 # The package will be replaced by this one, so remove
6027 # the corresponding Uninstall task if necessary.
6028 inst_pkg = inst_pkg[0]
6029 uninst_task = Package(built=inst_pkg.built,
6030 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6031 metadata=inst_pkg.metadata,
6032 operation="uninstall",
6033 root_config=inst_pkg.root_config,
6034 type_name=inst_pkg.type_name)
6036 mygraph.remove(uninst_task)
6040 if uninst_task is not None and \
6041 uninst_task not in ignored_uninstall_tasks and \
6042 myblocker_uninstalls.contains(uninst_task):
6043 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6044 myblocker_uninstalls.remove(uninst_task)
6045 # Discard any blockers that this Uninstall solves.
6046 for blocker in blocker_nodes:
6047 if not myblocker_uninstalls.child_nodes(blocker):
6048 myblocker_uninstalls.remove(blocker)
6050 self._dynamic_config._unsolvable_blockers:
6051 solved_blockers.add(blocker)
6053 retlist.append(node)
6055 if (isinstance(node, Package) and \
6056 "uninstall" == node.operation) or \
6057 (uninst_task is not None and \
6058 uninst_task in scheduled_uninstalls):
6059 # Include satisfied blockers in the merge list
6060 # since the user might be interested and also
6061 # it serves as an indicator that blocking packages
6062 # will be temporarily installed simultaneously.
6063 for blocker in solved_blockers:
6064 retlist.append(blocker)
6066 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
6067 for node in myblocker_uninstalls.root_nodes():
6068 unsolvable_blockers.add(node)
6070 # If any Uninstall tasks need to be executed in order
6071 # to avoid a conflict, complete the graph with any
6072 # dependencies that may have been initially
6073 # neglected (to ensure that unsafe Uninstall tasks
6074 # are properly identified and blocked from execution).
6075 if have_uninstall_task and \
6077 not unsolvable_blockers:
6078 self._dynamic_config.myparams["complete"] = True
6079 if '--debug' in self._frozen_config.myopts:
6081 msg.append("enabling 'complete' depgraph mode " + \
6082 "due to uninstall task(s):")
6084 for node in retlist:
6085 if isinstance(node, Package) and \
6086 node.operation == 'uninstall':
6087 msg.append("\t%s" % (node,))
6088 writemsg_level("\n%s\n" % \
6089 "".join("%s\n" % line for line in msg),
6090 level=logging.DEBUG, noiselevel=-1)
6091 raise self._serialize_tasks_retry("")
6093 # Set satisfied state on blockers, but not before the
6094 # above retry path, since we don't want to modify the
6095 # state in that case.
6096 for node in retlist:
6097 if isinstance(node, Blocker):
6098 node.satisfied = True
6100 for blocker in unsolvable_blockers:
6101 retlist.append(blocker)
6103 if unsolvable_blockers and \
6104 not self._accept_blocker_conflicts():
6105 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
6106 self._dynamic_config._serialized_tasks_cache = retlist[:]
6107 self._dynamic_config._scheduler_graph = scheduler_graph
6108 self._dynamic_config._skip_restart = True
6109 raise self._unknown_internal_error()
6111 if self._dynamic_config._slot_collision_info and \
6112 not self._accept_blocker_conflicts():
6113 self._dynamic_config._serialized_tasks_cache = retlist[:]
6114 self._dynamic_config._scheduler_graph = scheduler_graph
6115 raise self._unknown_internal_error()
6117 return retlist, scheduler_graph
6119 def _show_circular_deps(self, mygraph):
6120 self._dynamic_config._circular_dependency_handler = \
6121 circular_dependency_handler(self, mygraph)
6122 handler = self._dynamic_config._circular_dependency_handler
6124 self._frozen_config.myopts.pop("--quiet", None)
6125 self._frozen_config.myopts["--verbose"] = True
6126 self._frozen_config.myopts["--tree"] = True
6127 portage.writemsg("\n\n", noiselevel=-1)
6128 self.display(handler.merge_list)
6129 prefix = colorize("BAD", " * ")
6130 portage.writemsg("\n", noiselevel=-1)
6131 portage.writemsg(prefix + "Error: circular dependencies:\n",
6133 portage.writemsg("\n", noiselevel=-1)
6135 if handler.circular_dep_message is None:
6136 handler.debug_print()
6137 portage.writemsg("\n", noiselevel=-1)
6139 if handler.circular_dep_message is not None:
6140 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
6142 suggestions = handler.suggestions
6144 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
6145 if len(suggestions) == 1:
6146 writemsg("by applying the following change:\n", noiselevel=-1)
6148 writemsg("by applying " + colorize("bold", "any of") + \
6149 " the following changes:\n", noiselevel=-1)
6150 writemsg("".join(suggestions), noiselevel=-1)
6151 writemsg("\nNote that this change can be reverted, once the package has" + \
6152 " been installed.\n", noiselevel=-1)
6153 if handler.large_cycle_count:
6154 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
6155 "Several changes might be required to resolve all cycles.\n" + \
6156 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
6158 writemsg("\n\n", noiselevel=-1)
6159 writemsg(prefix + "Note that circular dependencies " + \
6160 "can often be avoided by temporarily\n", noiselevel=-1)
6161 writemsg(prefix + "disabling USE flags that trigger " + \
6162 "optional dependencies.\n", noiselevel=-1)
6164 def _show_merge_list(self):
6165 if self._dynamic_config._serialized_tasks_cache is not None and \
6166 not (self._dynamic_config._displayed_list is not None and \
6167 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
6168 self._dynamic_config._displayed_list == \
6169 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
6170 display_list = self._dynamic_config._serialized_tasks_cache[:]
6171 if "--tree" in self._frozen_config.myopts:
6172 display_list.reverse()
6173 self.display(display_list)
6175 def _show_unsatisfied_blockers(self, blockers):
6176 self._show_merge_list()
6177 msg = "Error: The above package list contains " + \
6178 "packages which cannot be installed " + \
6179 "at the same time on the same system."
6180 prefix = colorize("BAD", " * ")
6181 portage.writemsg("\n", noiselevel=-1)
6182 for line in textwrap.wrap(msg, 70):
6183 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6185 # Display the conflicting packages along with the packages
6186 # that pulled them in. This is helpful for troubleshooting
6187 # cases in which blockers don't solve automatically and
6188 # the reasons are not apparent from the normal merge list
6192 for blocker in blockers:
6193 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
6194 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
6195 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
6196 if not parent_atoms:
6197 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
6198 if atom is not None:
6199 parent_atoms = set([("@selected", atom)])
6201 conflict_pkgs[pkg] = parent_atoms
6204 # Reduce noise by pruning packages that are only
6205 # pulled in by other conflict packages.
6207 for pkg, parent_atoms in conflict_pkgs.items():
6208 relevant_parent = False
6209 for parent, atom in parent_atoms:
6210 if parent not in conflict_pkgs:
6211 relevant_parent = True
6213 if not relevant_parent:
6214 pruned_pkgs.add(pkg)
6215 for pkg in pruned_pkgs:
6216 del conflict_pkgs[pkg]
6222 for pkg, parent_atoms in conflict_pkgs.items():
6224 # Prefer packages that are not directly involved in a conflict.
6225 # It can be essential to see all the packages here, so don't
6226 # omit any. If the list is long, people can simply use a pager.
6227 preferred_parents = set()
6228 for parent_atom in parent_atoms:
6229 parent, atom = parent_atom
6230 if parent not in conflict_pkgs:
6231 preferred_parents.add(parent_atom)
6233 ordered_list = list(preferred_parents)
6234 if len(parent_atoms) > len(ordered_list):
6235 for parent_atom in parent_atoms:
6236 if parent_atom not in preferred_parents:
6237 ordered_list.append(parent_atom)
6239 msg.append(indent + "%s pulled in by\n" % pkg)
6241 for parent_atom in ordered_list:
6242 parent, atom = parent_atom
6243 msg.append(2*indent)
6244 if isinstance(parent,
6245 (PackageArg, AtomArg)):
6246 # For PackageArg and AtomArg types, it's
6247 # redundant to display the atom attribute.
6248 msg.append(str(parent))
6250 # Display the specific atom from SetArg or
6252 msg.append("%s required by %s" % (atom, parent))
6257 writemsg("".join(msg), noiselevel=-1)
6259 if "--quiet" not in self._frozen_config.myopts:
6260 show_blocker_docs_link()
6262 def display(self, mylist, favorites=[], verbosity=None):
6264 # This is used to prevent display_problems() from
6265 # redundantly displaying this exact same merge list
6266 # again via _show_merge_list().
6267 self._dynamic_config._displayed_list = mylist
6270 return display(self, mylist, favorites, verbosity)
6272 def _display_autounmask(self):
6274 Display --autounmask message and optionally write it to config files
6275 (using CONFIG_PROTECT). The message includes the comments and the changes.
6278 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
6279 autounmask_unrestricted_atoms = \
6280 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
6281 quiet = "--quiet" in self._frozen_config.myopts
6282 pretend = "--pretend" in self._frozen_config.myopts
6283 ask = "--ask" in self._frozen_config.myopts
6284 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
6286 def check_if_latest(pkg):
6288 is_latest_in_slot = True
6289 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
6290 root_config = self._frozen_config.roots[pkg.root]
6292 for db, pkg_type, built, installed, db_keys in dbs:
6293 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
6294 if other_pkg.cp != pkg.cp:
6295 # old-style PROVIDE virtual means there are no
6296 # normal matches for this pkg_type
6300 if other_pkg.slot_atom == pkg.slot_atom:
6301 is_latest_in_slot = False
6304 # iter_match_pkgs yields highest version first, so
6305 # there's no need to search this pkg_type any further
6308 if not is_latest_in_slot:
6311 return is_latest, is_latest_in_slot
6313 #Set of roots we have autounmask changes for.
6316 masked_by_missing_keywords = False
6317 unstable_keyword_msg = {}
6318 for pkg in self._dynamic_config._needed_unstable_keywords:
6319 self._show_merge_list()
6320 if pkg in self._dynamic_config.digraph:
6323 unstable_keyword_msg.setdefault(root, [])
6324 is_latest, is_latest_in_slot = check_if_latest(pkg)
6325 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6326 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6327 use=self._pkg_use_enabled(pkg))
6328 for reason in mreasons:
6329 if reason.unmask_hint and \
6330 reason.unmask_hint.key == 'unstable keyword':
6331 keyword = reason.unmask_hint.value
6333 masked_by_missing_keywords = True
6335 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
6336 if autounmask_unrestricted_atoms:
6338 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
6339 elif is_latest_in_slot:
6340 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
6342 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6344 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
6346 p_mask_change_msg = {}
6347 for pkg in self._dynamic_config._needed_p_mask_changes:
6348 self._show_merge_list()
6349 if pkg in self._dynamic_config.digraph:
6352 p_mask_change_msg.setdefault(root, [])
6353 is_latest, is_latest_in_slot = check_if_latest(pkg)
6354 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6355 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
6356 use=self._pkg_use_enabled(pkg))
6357 for reason in mreasons:
6358 if reason.unmask_hint and \
6359 reason.unmask_hint.key == 'p_mask':
6360 keyword = reason.unmask_hint.value
6362 comment, filename = portage.getmaskingreason(
6363 pkg.cpv, metadata=pkg.metadata,
6364 settings=pkgsettings,
6365 portdb=pkg.root_config.trees["porttree"].dbapi,
6366 return_location=True)
6368 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
6370 p_mask_change_msg[root].append("# %s:\n" % filename)
6372 comment = [line for line in
6373 comment.splitlines() if line]
6374 for line in comment:
6375 p_mask_change_msg[root].append("%s\n" % line)
6376 if autounmask_unrestricted_atoms:
6378 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
6379 elif is_latest_in_slot:
6380 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
6382 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6384 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
6386 use_changes_msg = {}
6387 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
6388 self._show_merge_list()
6389 if pkg in self._dynamic_config.digraph:
6392 use_changes_msg.setdefault(root, [])
6393 is_latest, is_latest_in_slot = check_if_latest(pkg)
6394 changes = needed_use_config_change[1]
6396 for flag, state in changes.items():
6398 adjustments.append(flag)
6400 adjustments.append("-" + flag)
6401 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
6403 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6404 elif is_latest_in_slot:
6405 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
6407 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
6410 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
6411 self._show_merge_list()
6412 if pkg in self._dynamic_config.digraph:
6415 license_msg.setdefault(root, [])
6416 is_latest, is_latest_in_slot = check_if_latest(pkg)
6418 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
6420 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6421 elif is_latest_in_slot:
6422 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
6424 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6426 def find_config_file(abs_user_config, file_name):
6428 Searches /etc/portage for an appropriate file to append changes to.
6429 If the file_name is a file it is returned, if it is a directory, the
6430 last file in it is returned. Order of traversal is the identical to
6431 portage.util.grablines(recursive=True).
6433 file_name - String containing a file name like "package.use"
6434 return value - String. Absolute path of file to write to. None if
6435 no suitable file exists.
6437 file_path = os.path.join(abs_user_config, file_name)
6441 except OSError as e:
6442 if e.errno == errno.ENOENT:
6443 # The file doesn't exist, so we'll
6447 # Disk or file system trouble?
6450 last_file_path = None
6459 if stat.S_ISREG(st.st_mode):
6461 elif stat.S_ISDIR(st.st_mode):
6462 if os.path.basename(p) in _ignorecvs_dirs:
6465 contents = os.listdir(p)
6469 contents.sort(reverse=True)
6470 for child in contents:
6471 if child.startswith(".") or \
6472 child.endswith("~"):
6474 stack.append(os.path.join(p, child))
6476 return last_file_path
6478 write_to_file = autounmask_write and not pretend
6479 #Make sure we have a file to write to before doing any write.
6480 file_to_write_to = {}
6484 settings = self._frozen_config.roots[root].settings
6485 abs_user_config = os.path.join(
6486 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6488 if root in unstable_keyword_msg:
6489 if not os.path.exists(os.path.join(abs_user_config,
6490 "package.keywords")):
6491 filename = "package.accept_keywords"
6493 filename = "package.keywords"
6494 file_to_write_to[(abs_user_config, "package.keywords")] = \
6495 find_config_file(abs_user_config, filename)
6497 if root in p_mask_change_msg:
6498 file_to_write_to[(abs_user_config, "package.unmask")] = \
6499 find_config_file(abs_user_config, "package.unmask")
6501 if root in use_changes_msg:
6502 file_to_write_to[(abs_user_config, "package.use")] = \
6503 find_config_file(abs_user_config, "package.use")
6505 if root in license_msg:
6506 file_to_write_to[(abs_user_config, "package.license")] = \
6507 find_config_file(abs_user_config, "package.license")
6509 for (abs_user_config, f), path in file_to_write_to.items():
6511 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6513 write_to_file = not problems
6515 def format_msg(lines):
6517 for i, line in enumerate(lines):
6518 if line.startswith("#"):
6520 lines[i] = colorize("INFORM", line.rstrip()) + "\n"
6521 return "".join(lines)
6524 settings = self._frozen_config.roots[root].settings
6525 abs_user_config = os.path.join(
6526 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6529 writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
6531 if root in unstable_keyword_msg:
6532 writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
6533 " are necessary to proceed:\n", noiselevel=-1)
6534 writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
6536 if root in p_mask_change_msg:
6537 writemsg("\nThe following " + colorize("BAD", "mask changes") + \
6538 " are necessary to proceed:\n", noiselevel=-1)
6539 writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
6541 if root in use_changes_msg:
6542 writemsg("\nThe following " + colorize("BAD", "USE changes") + \
6543 " are necessary to proceed:\n", noiselevel=-1)
6544 writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
6546 if root in license_msg:
6547 writemsg("\nThe following " + colorize("BAD", "license changes") + \
6548 " are necessary to proceed:\n", noiselevel=-1)
6549 writemsg(format_msg(license_msg[root]), noiselevel=-1)
6554 settings = self._frozen_config.roots[root].settings
6555 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6556 shlex_split(settings.get("CONFIG_PROTECT", "")),
6557 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6559 def write_changes(root, changes, file_to_write_to):
6560 file_contents = None
6562 file_contents = io.open(
6563 _unicode_encode(file_to_write_to,
6564 encoding=_encodings['fs'], errors='strict'),
6565 mode='r', encoding=_encodings['content'],
6566 errors='replace').readlines()
6567 except IOError as e:
6568 if e.errno == errno.ENOENT:
6571 problems.append("!!! Failed to read '%s': %s\n" % \
6572 (file_to_write_to, e))
6573 if file_contents is not None:
6574 file_contents.extend(changes)
6575 if protect_obj[root].isprotected(file_to_write_to):
6576 # We want to force new_protect_filename to ensure
6577 # that the user will see all our changes via
6578 # dispatch-conf, even if file_to_write_to doesn't
6579 # exist yet, so we specify force=True.
6580 file_to_write_to = new_protect_filename(file_to_write_to,
6583 write_atomic(file_to_write_to, "".join(file_contents))
6584 except PortageException:
6585 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6587 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
6590 "NOTE: The --autounmask-keep-masks option will prevent emerge",
6591 " from creating package.unmask or ** keyword changes."
6595 line = colorize("INFORM", line)
6596 writemsg(line + "\n", noiselevel=-1)
6598 if ask and write_to_file and file_to_write_to:
6599 prompt = "\nWould you like to add these " + \
6600 "changes to your config files?"
6601 if userquery(prompt, enter_invalid) == 'No':
6602 write_to_file = False
6604 if write_to_file and file_to_write_to:
6606 settings = self._frozen_config.roots[root].settings
6607 abs_user_config = os.path.join(
6608 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6609 ensure_dirs(abs_user_config)
6611 if root in unstable_keyword_msg:
6612 write_changes(root, unstable_keyword_msg[root],
6613 file_to_write_to.get((abs_user_config, "package.keywords")))
6615 if root in p_mask_change_msg:
6616 write_changes(root, p_mask_change_msg[root],
6617 file_to_write_to.get((abs_user_config, "package.unmask")))
6619 if root in use_changes_msg:
6620 write_changes(root, use_changes_msg[root],
6621 file_to_write_to.get((abs_user_config, "package.use")))
6623 if root in license_msg:
6624 write_changes(root, license_msg[root],
6625 file_to_write_to.get((abs_user_config, "package.license")))
6628 writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
6630 writemsg("".join(problems), noiselevel=-1)
6631 elif write_to_file and roots:
6632 writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
6634 elif not pretend and not autounmask_write and roots:
6635 writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
6639 def display_problems(self):
6641 Display problems with the dependency graph such as slot collisions.
6642 This is called internally by display() to show the problems _after_
6643 the merge list where it is most likely to be seen, but if display()
6644 is not going to be called then this method should be called explicitly
6645 to ensure that the user is notified of problems with the graph.
6648 if self._dynamic_config._circular_deps_for_display is not None:
6649 self._show_circular_deps(
6650 self._dynamic_config._circular_deps_for_display)
6652 # The slot conflict display has better noise reduction than
6653 # the unsatisfied blockers display, so skip unsatisfied blockers
6654 # display if there are slot conflicts (see bug #385391).
6655 if self._dynamic_config._slot_collision_info:
6656 self._show_slot_collision_notice()
6657 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
6658 self._show_unsatisfied_blockers(
6659 self._dynamic_config._unsatisfied_blockers_for_display)
6661 self._show_missed_update()
6663 self._show_ignored_binaries()
6665 self._display_autounmask()
6667 # TODO: Add generic support for "set problem" handlers so that
6668 # the below warnings aren't special cases for world only.
6670 if self._dynamic_config._missing_args:
6671 world_problems = False
6672 if "world" in self._dynamic_config.sets[
6673 self._frozen_config.target_root].sets:
6674 # Filter out indirect members of world (from nested sets)
6675 # since only direct members of world are desired here.
6676 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
6677 for arg, atom in self._dynamic_config._missing_args:
6678 if arg.name in ("selected", "world") and atom in world_set:
6679 world_problems = True
6683 sys.stderr.write("\n!!! Problems have been " + \
6684 "detected with your world file\n")
6685 sys.stderr.write("!!! Please run " + \
6686 green("emaint --check world")+"\n\n")
6688 if self._dynamic_config._missing_args:
6689 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6690 " Ebuilds for the following packages are either all\n")
6691 sys.stderr.write(colorize("BAD", "!!!") + \
6692 " masked or don't exist:\n")
6693 sys.stderr.write(" ".join(str(atom) for arg, atom in \
6694 self._dynamic_config._missing_args) + "\n")
6696 if self._dynamic_config._pprovided_args:
6698 for arg, atom in self._dynamic_config._pprovided_args:
6699 if isinstance(arg, SetArg):
6701 arg_atom = (atom, atom)
6704 arg_atom = (arg.arg, atom)
6705 refs = arg_refs.setdefault(arg_atom, [])
6706 if parent not in refs:
6709 msg.append(bad("\nWARNING: "))
6710 if len(self._dynamic_config._pprovided_args) > 1:
6711 msg.append("Requested packages will not be " + \
6712 "merged because they are listed in\n")
6714 msg.append("A requested package will not be " + \
6715 "merged because it is listed in\n")
6716 msg.append("package.provided:\n\n")
6717 problems_sets = set()
6718 for (arg, atom), refs in arg_refs.items():
6721 problems_sets.update(refs)
6723 ref_string = ", ".join(["'%s'" % name for name in refs])
6724 ref_string = " pulled in by " + ref_string
6725 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
6727 if "selected" in problems_sets or "world" in problems_sets:
6728 msg.append("This problem can be solved in one of the following ways:\n\n")
6729 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
6730 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
6731 msg.append(" C) Remove offending entries from package.provided.\n\n")
6732 msg.append("The best course of action depends on the reason that an offending\n")
6733 msg.append("package.provided entry exists.\n\n")
6734 sys.stderr.write("".join(msg))
6736 masked_packages = []
6737 for pkg in self._dynamic_config._masked_license_updates:
6738 root_config = pkg.root_config
6739 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6740 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
6741 masked_packages.append((root_config, pkgsettings,
6742 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6744 writemsg("\n" + colorize("BAD", "!!!") + \
6745 " The following updates are masked by LICENSE changes:\n",
6747 show_masked_packages(masked_packages)
6749 writemsg("\n", noiselevel=-1)
6751 masked_packages = []
6752 for pkg in self._dynamic_config._masked_installed:
6753 root_config = pkg.root_config
6754 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6755 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
6756 masked_packages.append((root_config, pkgsettings,
6757 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6759 writemsg("\n" + colorize("BAD", "!!!") + \
6760 " The following installed packages are masked:\n",
6762 show_masked_packages(masked_packages)
6764 writemsg("\n", noiselevel=-1)
6766 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6767 self._show_unsatisfied_dep(*pargs, **kwargs)
6769 def saveNomergeFavorites(self):
6770 """Find atoms in favorites that are not in the mergelist and add them
6771 to the world file if necessary."""
6772 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6773 "--oneshot", "--onlydeps", "--pretend"):
6774 if x in self._frozen_config.myopts:
6776 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6777 world_set = root_config.sets["selected"]
6779 world_locked = False
6780 if hasattr(world_set, "lock"):
6784 if hasattr(world_set, "load"):
6785 world_set.load() # maybe it's changed on disk
6787 args_set = self._dynamic_config.sets[
6788 self._frozen_config.target_root].sets['__non_set_args__']
6789 added_favorites = set()
6790 for x in self._dynamic_config._set_nodes:
6791 if x.operation != "nomerge":
6794 if x.root != root_config.root:
6798 myfavkey = create_world_atom(x, args_set, root_config)
6800 if myfavkey in added_favorites:
6802 added_favorites.add(myfavkey)
6803 except portage.exception.InvalidDependString as e:
6804 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6805 (x.cpv, e), noiselevel=-1)
6806 writemsg("!!! see '%s'\n\n" % os.path.join(
6807 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6810 for arg in self._dynamic_config._initial_arg_list:
6811 if not isinstance(arg, SetArg):
6813 if arg.root_config.root != root_config.root:
6819 if k in ("selected", "world") or \
6820 not root_config.sets[k].world_candidate:
6825 all_added.append(SETPREFIX + k)
6826 all_added.extend(added_favorites)
6829 if a.startswith(SETPREFIX):
6830 filename = "world_sets"
6834 ">>> Recording %s in \"%s\" favorites file...\n" %
6835 (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
6837 world_set.update(all_added)
6842 def _loadResumeCommand(self, resume_data, skip_masked=True,
6845 Add a resume command to the graph and validate it in the process. This
6846 will raise a PackageNotFound exception if a package is not available.
6851 if not isinstance(resume_data, dict):
6854 mergelist = resume_data.get("mergelist")
6855 if not isinstance(mergelist, list):
6858 favorites = resume_data.get("favorites")
6859 if isinstance(favorites, list):
6860 args = self._load_favorites(favorites)
6864 fakedb = self._dynamic_config.mydbapi
6865 serialized_tasks = []
6868 if not (isinstance(x, list) and len(x) == 4):
6870 pkg_type, myroot, pkg_key, action = x
6871 if pkg_type not in self.pkg_tree_map:
6873 if action != "merge":
6875 root_config = self._frozen_config.roots[myroot]
6877 # Use the resume "favorites" list to see if a repo was specified
6879 depgraph_sets = self._dynamic_config.sets[root_config.root]
6881 for atom in depgraph_sets.atoms.getAtoms():
6882 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6886 atom = "=" + pkg_key
6888 atom = atom + _repo_separator + repo
6891 atom = Atom(atom, allow_repo=True)
6896 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6897 if not self._pkg_visibility_check(pkg) or \
6898 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6899 modified_use=self._pkg_use_enabled(pkg)):
6904 # It does no exist or it is corrupt.
6906 # TODO: log these somewhere
6908 raise portage.exception.PackageNotFound(pkg_key)
6910 if "merge" == pkg.operation and \
6911 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6912 modified_use=self._pkg_use_enabled(pkg)):
6915 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6917 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6919 self._dynamic_config._unsatisfied_deps_for_display.append(
6920 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6922 fakedb[myroot].cpv_inject(pkg)
6923 serialized_tasks.append(pkg)
6924 self._spinner_update()
6926 if self._dynamic_config._unsatisfied_deps_for_display:
6929 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6930 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6931 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6933 self._select_package = self._select_pkg_from_graph
6934 self._dynamic_config.myparams["selective"] = True
6935 # Always traverse deep dependencies in order to account for
6936 # potentially unsatisfied dependencies of installed packages.
6937 # This is necessary for correct --keep-going or --resume operation
6938 # in case a package from a group of circularly dependent packages
6939 # fails. In this case, a package which has recently been installed
6940 # may have an unsatisfied circular dependency (pulled in by
6941 # PDEPEND, for example). So, even though a package is already
6942 # installed, it may not have all of it's dependencies satisfied, so
6943 # it may not be usable. If such a package is in the subgraph of
6944 # deep depenedencies of a scheduled build, that build needs to
6945 # be cancelled. In order for this type of situation to be
6946 # recognized, deep traversal of dependencies is required.
6947 self._dynamic_config.myparams["deep"] = True
6949 for task in serialized_tasks:
6950 if isinstance(task, Package) and \
6951 task.operation == "merge":
6952 if not self._add_pkg(task, None):
6955 # Packages for argument atoms need to be explicitly
6956 # added via _add_pkg() so that they are included in the
6957 # digraph (needed at least for --tree display).
6958 for arg in self._expand_set_args(args, add_to_digraph=True):
6959 for atom in arg.pset.getAtoms():
6960 pkg, existing_node = self._select_package(
6961 arg.root_config.root, atom)
6962 if existing_node is None and \
6964 if not self._add_pkg(pkg, Dependency(atom=atom,
6965 root=pkg.root, parent=arg)):
6968 # Allow unsatisfied deps here to avoid showing a masking
6969 # message for an unsatisfied dep that isn't necessarily
6971 if not self._create_graph(allow_unsatisfied=True):
6974 unsatisfied_deps = []
6975 for dep in self._dynamic_config._unsatisfied_deps:
6976 if not isinstance(dep.parent, Package):
6978 if dep.parent.operation == "merge":
6979 unsatisfied_deps.append(dep)
6982 # For unsatisfied deps of installed packages, only account for
6983 # them if they are in the subgraph of dependencies of a package
6984 # which is scheduled to be installed.
6985 unsatisfied_install = False
6987 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6989 node = dep_stack.pop()
6990 if not isinstance(node, Package):
6992 if node.operation == "merge":
6993 unsatisfied_install = True
6995 if node in traversed:
6998 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
7000 if unsatisfied_install:
7001 unsatisfied_deps.append(dep)
7003 if masked_tasks or unsatisfied_deps:
7004 # This probably means that a required package
7005 # was dropped via --skipfirst. It makes the
7006 # resume list invalid, so convert it to a
7007 # UnsatisfiedResumeDep exception.
7008 raise self.UnsatisfiedResumeDep(self,
7009 masked_tasks + unsatisfied_deps)
7010 self._dynamic_config._serialized_tasks_cache = None
7013 except self._unknown_internal_error:
7018 def _load_favorites(self, favorites):
7020 Use a list of favorites to resume state from a
7021 previous select_files() call. This creates similar
7022 DependencyArg instances to those that would have
7023 been created by the original select_files() call.
7024 This allows Package instances to be matched with
7025 DependencyArg instances during graph creation.
7027 root_config = self._frozen_config.roots[self._frozen_config.target_root]
7028 sets = root_config.sets
7029 depgraph_sets = self._dynamic_config.sets[root_config.root]
7032 if not isinstance(x, basestring):
7034 if x in ("system", "world"):
7036 if x.startswith(SETPREFIX):
7037 s = x[len(SETPREFIX):]
7040 if s in depgraph_sets.sets:
7043 depgraph_sets.sets[s] = pset
7044 args.append(SetArg(arg=x, pset=pset,
7045 root_config=root_config))
7048 x = Atom(x, allow_repo=True)
7049 except portage.exception.InvalidAtom:
7051 args.append(AtomArg(arg=x, atom=x,
7052 root_config=root_config))
7054 self._set_args(args)
7057 class UnsatisfiedResumeDep(portage.exception.PortageException):
7059 A dependency of a resume list is not installed. This
7060 can occur when a required package is dropped from the
7061 merge list via --skipfirst.
7063 def __init__(self, depgraph, value):
7064 portage.exception.PortageException.__init__(self, value)
7065 self.depgraph = depgraph
7067 class _internal_exception(portage.exception.PortageException):
7068 def __init__(self, value=""):
7069 portage.exception.PortageException.__init__(self, value)
7071 class _unknown_internal_error(_internal_exception):
7073 Used by the depgraph internally to terminate graph creation.
7074 The specific reason for the failure should have been dumped
7075 to stderr, unfortunately, the exact reason for the failure
7079 class _serialize_tasks_retry(_internal_exception):
7081 This is raised by the _serialize_tasks() method when it needs to
7082 be called again for some reason. The only case that it's currently
7083 used for is when neglected dependencies need to be added to the
7084 graph in order to avoid making a potentially unsafe decision.
7087 class _backtrack_mask(_internal_exception):
7089 This is raised by _show_unsatisfied_dep() when it's called with
7090 check_backtrack=True and a matching package has been masked by
7094 class _autounmask_breakage(_internal_exception):
7096 This is raised by _show_unsatisfied_dep() when it's called with
7097 check_autounmask_breakage=True and a matching package has been
7098 been disqualified due to autounmask changes.
7101 def need_restart(self):
7102 return self._dynamic_config._need_restart and \
7103 not self._dynamic_config._skip_restart
7105 def success_without_autounmask(self):
7106 return self._dynamic_config._success_without_autounmask
7108 def autounmask_breakage_detected(self):
7110 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
7111 self._show_unsatisfied_dep(
7112 *pargs, check_autounmask_breakage=True, **kwargs)
7113 except self._autounmask_breakage:
7117 def get_backtrack_infos(self):
7118 return self._dynamic_config._backtrack_infos
7121 class _dep_check_composite_db(dbapi):
7123 A dbapi-like interface that is optimized for use in dep_check() calls.
7124 This is built on top of the existing depgraph package selection logic.
7125 Some packages that have been added to the graph may be masked from this
7126 view in order to influence the atom preference selection that occurs
7129 def __init__(self, depgraph, root):
7130 dbapi.__init__(self)
7131 self._depgraph = depgraph
7133 self._match_cache = {}
7134 self._cpv_pkg_map = {}
7136 def _clear_cache(self):
7137 self._match_cache.clear()
7138 self._cpv_pkg_map.clear()
7140 def cp_list(self, cp):
7142 Emulate cp_list just so it can be used to check for existence
7143 of new-style virtuals. Since it's a waste of time to return
7144 more than one cpv for this use case, a maximum of one cpv will
7147 if isinstance(cp, Atom):
7152 for pkg in self._depgraph._iter_match_pkgs_any(
7153 self._depgraph._frozen_config.roots[self._root], atom):
7160 def match(self, atom):
7161 cache_key = (atom, atom.unevaluated_atom)
7162 ret = self._match_cache.get(cache_key)
7167 pkg, existing = self._depgraph._select_package(self._root, atom)
7169 if pkg is not None and self._visible(pkg):
7170 self._cpv_pkg_map[pkg.cpv] = pkg
7173 if pkg is not None and \
7174 atom.slot is None and \
7175 pkg.cp.startswith("virtual/") and \
7176 (("remove" not in self._depgraph._dynamic_config.myparams and
7177 "--update" not in self._depgraph._frozen_config.myopts) or
7179 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
7180 # For new-style virtual lookahead that occurs inside dep_check()
7181 # for bug #141118, examine all slots. This is needed so that newer
7182 # slots will not unnecessarily be pulled in when a satisfying lower
7183 # slot is already installed. For example, if virtual/jdk-1.5 is
7184 # satisfied via gcj-jdk then there's no need to pull in a newer
7185 # slot to satisfy a virtual/jdk dependency, unless --update is
7189 for virt_pkg in self._depgraph._iter_match_pkgs_any(
7190 self._depgraph._frozen_config.roots[self._root], atom):
7191 if virt_pkg.cp != pkg.cp:
7193 slots.add(virt_pkg.slot)
7195 slots.remove(pkg.slot)
7197 slot_atom = atom.with_slot(slots.pop())
7198 pkg, existing = self._depgraph._select_package(
7199 self._root, slot_atom)
7202 if not self._visible(pkg):
7204 self._cpv_pkg_map[pkg.cpv] = pkg
7208 self._cpv_sort_ascending(ret)
7210 self._match_cache[cache_key] = ret
7213 def _visible(self, pkg):
7214 if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
7216 if pkg.installed and \
7217 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
7218 # Account for packages with masks (like KEYWORDS masks)
7219 # that are usually ignored in visibility checks for
7220 # installed packages, in order to handle cases like
7222 myopts = self._depgraph._frozen_config.myopts
7223 use_ebuild_visibility = myopts.get(
7224 '--use-ebuild-visibility', 'n') != 'n'
7225 avoid_update = "--update" not in myopts and \
7226 "remove" not in self._depgraph._dynamic_config.myparams
7227 usepkgonly = "--usepkgonly" in myopts
7228 if not avoid_update:
7229 if not use_ebuild_visibility and usepkgonly:
7231 elif not self._depgraph._equiv_ebuild_visible(pkg):
7234 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
7235 self._root].get(pkg.slot_atom)
7236 if in_graph is None:
7237 # Mask choices for packages which are not the highest visible
7238 # version within their slot (since they usually trigger slot
7240 highest_visible, in_graph = self._depgraph._select_package(
7241 self._root, pkg.slot_atom)
7242 # Note: highest_visible is not necessarily the real highest
7243 # visible, especially when --update is not enabled, so use
7244 # < operator instead of !=.
7245 if highest_visible is not None and pkg < highest_visible:
7247 elif in_graph != pkg:
7248 # Mask choices for packages that would trigger a slot
7249 # conflict with a previously selected package.
7253 def aux_get(self, cpv, wants):
7254 metadata = self._cpv_pkg_map[cpv].metadata
7255 return [metadata.get(x, "") for x in wants]
7257 def match_pkgs(self, atom):
7258 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
7260 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
7262 if "--quiet" in myopts:
7263 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7264 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
7265 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7266 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
7269 s = search(root_config, spinner, "--searchdesc" in myopts,
7270 "--quiet" not in myopts, "--usepkg" in myopts,
7271 "--usepkgonly" in myopts)
7272 null_cp = portage.dep_getkey(insert_category_into_atom(
7274 cat, atom_pn = portage.catsplit(null_cp)
7275 s.searchkey = atom_pn
7276 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
7279 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
7280 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
7282 def _spinner_start(spinner, myopts):
7285 if "--quiet" not in myopts and \
7286 ("--pretend" in myopts or "--ask" in myopts or \
7287 "--tree" in myopts or "--verbose" in myopts):
7289 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7291 elif "--buildpkgonly" in myopts:
7295 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
7296 if "--unordered-display" in myopts:
7297 portage.writemsg_stdout("\n" + \
7298 darkgreen("These are the packages that " + \
7299 "would be %s:" % action) + "\n\n")
7301 portage.writemsg_stdout("\n" + \
7302 darkgreen("These are the packages that " + \
7303 "would be %s, in reverse order:" % action) + "\n\n")
7305 portage.writemsg_stdout("\n" + \
7306 darkgreen("These are the packages that " + \
7307 "would be %s, in order:" % action) + "\n\n")
7309 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
7310 if not show_spinner:
7311 spinner.update = spinner.update_quiet
7314 portage.writemsg_stdout("Calculating dependencies ")
7316 def _spinner_stop(spinner):
7317 if spinner is None or \
7318 spinner.update == spinner.update_quiet:
7321 if spinner.update != spinner.update_basic:
7322 # update_basic is used for non-tty output,
7323 # so don't output backspaces in that case.
7324 portage.writemsg_stdout("\b\b")
7326 portage.writemsg_stdout("... done!\n")
7328 def backtrack_depgraph(settings, trees, myopts, myparams,
7329 myaction, myfiles, spinner):
7331 Raises PackageSetNotFound if myfiles contains a missing package set.
7333 _spinner_start(spinner, myopts)
7335 return _backtrack_depgraph(settings, trees, myopts, myparams,
7336 myaction, myfiles, spinner)
7338 _spinner_stop(spinner)
7341 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
7343 debug = "--debug" in myopts
7345 max_retries = myopts.get('--backtrack', 10)
7346 max_depth = max(1, (max_retries + 1) / 2)
7347 allow_backtracking = max_retries > 0
7348 backtracker = Backtracker(max_depth)
7351 frozen_config = _frozen_depgraph_config(settings, trees,
7356 if debug and mydepgraph is not None:
7358 "\n\nbacktracking try %s \n\n" % \
7359 backtracked, noiselevel=-1, level=logging.DEBUG)
7360 mydepgraph.display_problems()
7362 backtrack_parameters = backtracker.get()
7364 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7365 frozen_config=frozen_config,
7366 allow_backtracking=allow_backtracking,
7367 backtrack_parameters=backtrack_parameters)
7368 success, favorites = mydepgraph.select_files(myfiles)
7370 if success or mydepgraph.success_without_autounmask():
7372 elif not allow_backtracking:
7374 elif backtracked >= max_retries:
7376 elif mydepgraph.need_restart():
7378 backtracker.feedback(mydepgraph.get_backtrack_infos())
7382 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
7386 "\n\nbacktracking aborted after %s tries\n\n" % \
7387 backtracked, noiselevel=-1, level=logging.DEBUG)
7388 mydepgraph.display_problems()
7390 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7391 frozen_config=frozen_config,
7392 allow_backtracking=False,
7393 backtrack_parameters=backtracker.get_best_run())
7394 success, favorites = mydepgraph.select_files(myfiles)
7396 if not success and mydepgraph.autounmask_breakage_detected():
7399 "\n\nautounmask breakage detected\n\n",
7400 noiselevel=-1, level=logging.DEBUG)
7401 mydepgraph.display_problems()
7402 myopts["--autounmask"] = "n"
7403 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7404 frozen_config=frozen_config, allow_backtracking=False)
7405 success, favorites = mydepgraph.select_files(myfiles)
7407 return (success, mydepgraph, favorites)
7410 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7412 Raises PackageSetNotFound if myfiles contains a missing package set.
7414 _spinner_start(spinner, myopts)
7416 return _resume_depgraph(settings, trees, mtimedb, myopts,
7419 _spinner_stop(spinner)
7421 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7423 Construct a depgraph for the given resume list. This will raise
7424 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
7425 TODO: Return reasons for dropped_tasks, for display/logging.
7427 @return: (success, depgraph, dropped_tasks)
7430 skip_unsatisfied = True
7431 mergelist = mtimedb["resume"]["mergelist"]
7432 dropped_tasks = set()
7433 frozen_config = _frozen_depgraph_config(settings, trees,
7436 mydepgraph = depgraph(settings, trees,
7437 myopts, myparams, spinner, frozen_config=frozen_config)
7439 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7440 skip_masked=skip_masked)
7441 except depgraph.UnsatisfiedResumeDep as e:
7442 if not skip_unsatisfied:
7445 graph = mydepgraph._dynamic_config.digraph
7446 unsatisfied_parents = dict((dep.parent, dep.parent) \
7448 traversed_nodes = set()
7449 unsatisfied_stack = list(unsatisfied_parents)
7450 while unsatisfied_stack:
7451 pkg = unsatisfied_stack.pop()
7452 if pkg in traversed_nodes:
7454 traversed_nodes.add(pkg)
7456 # If this package was pulled in by a parent
7457 # package scheduled for merge, removing this
7458 # package may cause the the parent package's
7459 # dependency to become unsatisfied.
7460 for parent_node in graph.parent_nodes(pkg):
7461 if not isinstance(parent_node, Package) \
7462 or parent_node.operation not in ("merge", "nomerge"):
7464 # We need to traverse all priorities here, in order to
7465 # ensure that a package with an unsatisfied depenedency
7466 # won't get pulled in, even indirectly via a soft
7468 unsatisfied_parents[parent_node] = parent_node
7469 unsatisfied_stack.append(parent_node)
7471 unsatisfied_tuples = frozenset(tuple(parent_node)
7472 for parent_node in unsatisfied_parents
7473 if isinstance(parent_node, Package))
7474 pruned_mergelist = []
7476 if isinstance(x, list) and \
7477 tuple(x) not in unsatisfied_tuples:
7478 pruned_mergelist.append(x)
7480 # If the mergelist doesn't shrink then this loop is infinite.
7481 if len(pruned_mergelist) == len(mergelist):
7482 # This happens if a package can't be dropped because
7483 # it's already installed, but it has unsatisfied PDEPEND.
7485 mergelist[:] = pruned_mergelist
7487 # Exclude installed packages that have been removed from the graph due
7488 # to failure to build/install runtime dependencies after the dependent
7489 # package has already been installed.
7490 dropped_tasks.update(pkg for pkg in \
7491 unsatisfied_parents if pkg.operation != "nomerge")
7493 del e, graph, traversed_nodes, \
7494 unsatisfied_parents, unsatisfied_stack
7498 return (success, mydepgraph, dropped_tasks)
7500 def get_mask_info(root_config, cpv, pkgsettings,
7501 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
7503 metadata = dict(zip(db_keys,
7504 db.aux_get(cpv, db_keys, myrepo=myrepo)))
7508 if metadata is None:
7509 mreasons = ["corruption"]
7511 eapi = metadata['EAPI']
7512 if not portage.eapi_is_supported(eapi):
7513 mreasons = ['EAPI %s' % eapi]
7515 pkg = Package(type_name=pkg_type, root_config=root_config,
7516 cpv=cpv, built=built, installed=installed, metadata=metadata)
7519 if _pkg_use_enabled is not None:
7520 modified_use = _pkg_use_enabled(pkg)
7522 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7524 return metadata, mreasons
7526 def show_masked_packages(masked_packages):
7527 shown_licenses = set()
7528 shown_comments = set()
7529 # Maybe there is both an ebuild and a binary. Only
7530 # show one of them to avoid redundant appearance.
7532 have_eapi_mask = False
7533 for (root_config, pkgsettings, cpv, repo,
7534 metadata, mreasons) in masked_packages:
7537 output_cpv += _repo_separator + repo
7538 if output_cpv in shown_cpvs:
7540 shown_cpvs.add(output_cpv)
7541 eapi_masked = metadata is not None and \
7542 not portage.eapi_is_supported(metadata["EAPI"])
7544 have_eapi_mask = True
7545 # When masked by EAPI, metadata is mostly useless since
7546 # it doesn't contain essential things like SLOT.
7548 comment, filename = None, None
7549 if not eapi_masked and \
7550 "package.mask" in mreasons:
7551 comment, filename = \
7552 portage.getmaskingreason(
7553 cpv, metadata=metadata,
7554 settings=pkgsettings,
7555 portdb=root_config.trees["porttree"].dbapi,
7556 return_location=True)
7557 missing_licenses = []
7558 if not eapi_masked and metadata is not None:
7560 missing_licenses = \
7561 pkgsettings._getMissingLicenses(
7563 except portage.exception.InvalidDependString:
7564 # This will have already been reported
7565 # above via mreasons.
7568 writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
7571 if comment and comment not in shown_comments:
7572 writemsg(filename + ":\n" + comment + "\n",
7574 shown_comments.add(comment)
7575 portdb = root_config.trees["porttree"].dbapi
7576 for l in missing_licenses:
7577 l_path = portdb.findLicensePath(l)
7578 if l in shown_licenses:
7580 msg = ("A copy of the '%s' license" + \
7581 " is located at '%s'.\n\n") % (l, l_path)
7582 writemsg(msg, noiselevel=-1)
7583 shown_licenses.add(l)
7584 return have_eapi_mask
7586 def show_mask_docs():
7587 writemsg("For more information, see the MASKED PACKAGES "
7588 "section in the emerge\n", noiselevel=-1)
7589 writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7591 def show_blocker_docs_link():
7592 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7593 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7594 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7596 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7597 return [mreason.message for \
7598 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7600 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7601 mreasons = _getmaskingstatus(
7602 pkg, settings=pkgsettings,
7603 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7605 if not pkg.installed:
7606 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
7607 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7608 pkg.metadata["CHOST"]))
7611 for msgs in pkg.invalid.values():
7614 _MaskReason("invalid", "invalid: %s" % (msg,)))
7616 if not pkg.metadata["SLOT"]:
7618 _MaskReason("invalid", "SLOT: undefined"))