1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
13 from collections import deque
14 from itertools import chain
17 from portage import os, OrderedDict
18 from portage import _unicode_decode, _unicode_encode, _encodings
19 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
20 from portage.dbapi import dbapi
21 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
22 check_required_use, human_readable_required_use, _repo_separator
23 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
24 from portage.exception import InvalidAtom, InvalidDependString, PortageException
25 from portage.output import colorize, create_color_func, \
27 bad = create_color_func("BAD")
28 from portage.package.ebuild.getmaskingstatus import \
29 _getmaskingstatus, _MaskReason
30 from portage._sets import SETPREFIX
31 from portage._sets.base import InternalPackageSet
32 from portage.util import ConfigProtect, shlex_split, new_protect_filename
33 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
34 from portage.util import ensure_dirs
35 from portage.util import writemsg_level, write_atomic
36 from portage.util.digraph import digraph
37 from portage.util.listdir import _ignorecvs_dirs
38 from portage.versions import catpkgsplit
40 from _emerge.AtomArg import AtomArg
41 from _emerge.Blocker import Blocker
42 from _emerge.BlockerCache import BlockerCache
43 from _emerge.BlockerDepPriority import BlockerDepPriority
44 from _emerge.countdown import countdown
45 from _emerge.create_world_atom import create_world_atom
46 from _emerge.Dependency import Dependency
47 from _emerge.DependencyArg import DependencyArg
48 from _emerge.DepPriority import DepPriority
49 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
50 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
51 from _emerge.FakeVartree import FakeVartree
52 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
53 from _emerge.is_valid_package_atom import insert_category_into_atom, \
55 from _emerge.Package import Package
56 from _emerge.PackageArg import PackageArg
57 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
58 from _emerge.RootConfig import RootConfig
59 from _emerge.search import search
60 from _emerge.SetArg import SetArg
61 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
62 from _emerge.UnmergeDepPriority import UnmergeDepPriority
63 from _emerge.UseFlagDisplay import pkg_use_display
64 from _emerge.userquery import userquery
66 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
67 from _emerge.resolver.slot_collision import slot_conflict_handler
68 from _emerge.resolver.circular_dependency import circular_dependency_handler
69 from _emerge.resolver.output import Display
71 if sys.hexversion >= 0x3000000:
75 class _scheduler_graph_config(object):
76 def __init__(self, trees, pkg_cache, graph, mergelist):
78 self.pkg_cache = pkg_cache
80 self.mergelist = mergelist
82 def _wildcard_set(atoms):
83 pkgs = InternalPackageSet(allow_wildcard=True)
86 x = Atom(x, allow_wildcard=True)
87 except portage.exception.InvalidAtom:
88 x = Atom("*/" + x, allow_wildcard=True)
92 class _frozen_depgraph_config(object):
94 def __init__(self, settings, trees, myopts, spinner):
95 self.settings = settings
96 self.target_root = settings["EROOT"]
99 if settings.get("PORTAGE_DEBUG", "") == "1":
101 self.spinner = spinner
102 self._running_root = trees[trees._running_eroot]["root_config"]
103 self._opts_no_restart = frozenset(["--buildpkgonly",
104 "--fetchonly", "--fetch-all-uri", "--pretend"])
105 self.pkgsettings = {}
107 self._trees_orig = trees
109 # All Package instances
111 self._highest_license_masked = {}
112 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
114 self.trees[myroot] = {}
115 # Create a RootConfig instance that references
116 # the FakeVartree instead of the real one.
117 self.roots[myroot] = RootConfig(
118 trees[myroot]["vartree"].settings,
120 trees[myroot]["root_config"].setconfig)
121 for tree in ("porttree", "bintree"):
122 self.trees[myroot][tree] = trees[myroot][tree]
123 self.trees[myroot]["vartree"] = \
124 FakeVartree(trees[myroot]["root_config"],
125 pkg_cache=self._pkg_cache,
126 pkg_root_config=self.roots[myroot],
127 dynamic_deps=dynamic_deps)
128 self.pkgsettings[myroot] = portage.config(
129 clone=self.trees[myroot]["vartree"].settings)
131 self._required_set_names = set(["world"])
133 atoms = ' '.join(myopts.get("--exclude", [])).split()
134 self.excluded_pkgs = _wildcard_set(atoms)
135 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
136 self.reinstall_atoms = _wildcard_set(atoms)
137 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
138 self.usepkg_exclude = _wildcard_set(atoms)
139 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
140 self.useoldpkg_atoms = _wildcard_set(atoms)
141 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
142 self.rebuild_exclude = _wildcard_set(atoms)
143 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
144 self.rebuild_ignore = _wildcard_set(atoms)
146 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
147 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
148 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
150 class _depgraph_sets(object):
152 # contains all sets added to the graph
154 # contains non-set atoms given as arguments
155 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
156 # contains all atoms from all sets added to the graph, including
157 # atoms given as arguments
158 self.atoms = InternalPackageSet(allow_repo=True)
159 self.atom_arg_map = {}
161 class _rebuild_config(object):
162 def __init__(self, frozen_config, backtrack_parameters):
163 self._graph = digraph()
164 self._frozen_config = frozen_config
165 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
166 self.orig_rebuild_list = self.rebuild_list.copy()
167 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
168 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
169 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
170 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
171 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
172 self.rebuild_if_unbuilt)
174 def add(self, dep_pkg, dep):
175 parent = dep.collapsed_parent
176 priority = dep.collapsed_priority
177 rebuild_exclude = self._frozen_config.rebuild_exclude
178 rebuild_ignore = self._frozen_config.rebuild_ignore
179 if (self.rebuild and isinstance(parent, Package) and
180 parent.built and priority.buildtime and
181 isinstance(dep_pkg, Package) and
182 not rebuild_exclude.findAtomForPackage(parent) and
183 not rebuild_ignore.findAtomForPackage(dep_pkg)):
184 self._graph.add(dep_pkg, parent, priority)
186 def _needs_rebuild(self, dep_pkg):
187 """Check whether packages that depend on dep_pkg need to be rebuilt."""
188 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
189 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
192 if self.rebuild_if_unbuilt:
193 # dep_pkg is being installed from source, so binary
194 # packages for parents are invalid. Force rebuild
197 trees = self._frozen_config.trees
198 vardb = trees[dep_pkg.root]["vartree"].dbapi
199 if self.rebuild_if_new_rev:
200 # Parent packages are valid if a package with the same
201 # cpv is already installed.
202 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
204 # Otherwise, parent packages are valid if a package with the same
205 # version (excluding revision) is already installed.
206 assert self.rebuild_if_new_ver
207 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
208 for inst_cpv in vardb.match(dep_pkg.slot_atom):
209 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
210 if inst_cpv_norev == cpv_norev:
215 def _trigger_rebuild(self, parent, build_deps):
216 root_slot = (parent.root, parent.slot_atom)
217 if root_slot in self.rebuild_list:
219 trees = self._frozen_config.trees
221 for slot_atom, dep_pkg in build_deps.items():
222 dep_root_slot = (dep_pkg.root, slot_atom)
223 if self._needs_rebuild(dep_pkg):
224 self.rebuild_list.add(root_slot)
226 elif ("--usepkg" in self._frozen_config.myopts and
227 (dep_root_slot in self.reinstall_list or
228 dep_root_slot in self.rebuild_list or
229 not dep_pkg.installed)):
231 # A direct rebuild dependency is being installed. We
232 # should update the parent as well to the latest binary,
233 # if that binary is valid.
235 # To validate the binary, we check whether all of the
236 # rebuild dependencies are present on the same binhost.
238 # 1) If parent is present on the binhost, but one of its
239 # rebuild dependencies is not, then the parent should
240 # be rebuilt from source.
241 # 2) Otherwise, the parent binary is assumed to be valid,
242 # because all of its rebuild dependencies are
244 bintree = trees[parent.root]["bintree"]
245 uri = bintree.get_pkgindex_uri(parent.cpv)
246 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
247 bindb = bintree.dbapi
248 if self.rebuild_if_new_ver and uri and uri != dep_uri:
249 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
250 for cpv in bindb.match(dep_pkg.slot_atom):
251 if cpv_norev == catpkgsplit(cpv)[:-1]:
252 dep_uri = bintree.get_pkgindex_uri(cpv)
255 if uri and uri != dep_uri:
256 # 1) Remote binary package is invalid because it was
257 # built without dep_pkg. Force rebuild.
258 self.rebuild_list.add(root_slot)
260 elif (parent.installed and
261 root_slot not in self.reinstall_list):
262 inst_build_time = parent.metadata.get("BUILD_TIME")
264 bin_build_time, = bindb.aux_get(parent.cpv,
268 if bin_build_time != inst_build_time:
269 # 2) Remote binary package is valid, and local package
270 # is not up to date. Force reinstall.
273 self.reinstall_list.add(root_slot)
276 def trigger_rebuilds(self):
278 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
279 depends on pkgA at both build-time and run-time, pkgB needs to be
286 leaf_nodes = deque(graph.leaf_nodes())
288 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
289 # will always know which children are being rebuilt.
292 # We'll have to drop an edge. This should be quite rare.
293 leaf_nodes.append(graph.order[-1])
295 node = leaf_nodes.popleft()
296 if node not in graph:
297 # This can be triggered by circular dependencies.
299 slot_atom = node.slot_atom
301 # Remove our leaf node from the graph, keeping track of deps.
302 parents = graph.parent_nodes(node)
304 node_build_deps = build_deps.get(node, {})
305 for parent in parents:
307 # Ignore a direct cycle.
309 parent_bdeps = build_deps.setdefault(parent, {})
310 parent_bdeps[slot_atom] = node
311 if not graph.child_nodes(parent):
312 leaf_nodes.append(parent)
314 # Trigger rebuilds for our leaf node. Because all of our children
315 # have been processed, the build_deps will be completely filled in,
316 # and self.rebuild_list / self.reinstall_list will tell us whether
317 # any of our children need to be rebuilt or reinstalled.
318 if self._trigger_rebuild(node, node_build_deps):
324 class _dynamic_depgraph_config(object):
326 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
327 self.myparams = myparams.copy()
328 self._vdb_loaded = False
329 self._allow_backtracking = allow_backtracking
330 # Maps slot atom to package for each Package added to the graph.
331 self._slot_pkg_map = {}
332 # Maps nodes to the reasons they were selected for reinstallation.
333 self._reinstall_nodes = {}
335 # Contains a filtered view of preferred packages that are selected
336 # from available repositories.
337 self._filtered_trees = {}
338 # Contains installed packages and new packages that have been added
340 self._graph_trees = {}
341 # Caches visible packages returned from _select_package, for use in
342 # depgraph._iter_atoms_for_pkg() SLOT logic.
343 self._visible_pkgs = {}
344 #contains the args created by select_files
345 self._initial_arg_list = []
346 self.digraph = portage.digraph()
347 # manages sets added to the graph
349 # contains all nodes pulled in by self.sets
350 self._set_nodes = set()
351 # Contains only Blocker -> Uninstall edges
352 self._blocker_uninstalls = digraph()
353 # Contains only Package -> Blocker edges
354 self._blocker_parents = digraph()
355 # Contains only irrelevant Package -> Blocker edges
356 self._irrelevant_blockers = digraph()
357 # Contains only unsolvable Package -> Blocker edges
358 self._unsolvable_blockers = digraph()
359 # Contains all Blocker -> Blocked Package edges
360 self._blocked_pkgs = digraph()
361 # Contains world packages that have been protected from
362 # uninstallation but may not have been added to the graph
363 # if the graph is not complete yet.
364 self._blocked_world_pkgs = {}
365 # Contains packages whose dependencies have been traversed.
366 # This use used to check if we have accounted for blockers
367 # relevant to a package.
368 self._traversed_pkg_deps = set()
369 self._slot_collision_info = {}
370 # Slot collision nodes are not allowed to block other packages since
371 # blocker validation is only able to account for one package per slot.
372 self._slot_collision_nodes = set()
373 self._parent_atoms = {}
374 self._slot_conflict_parent_atoms = set()
375 self._slot_conflict_handler = None
376 self._circular_dependency_handler = None
377 self._serialized_tasks_cache = None
378 self._scheduler_graph = None
379 self._displayed_list = None
380 self._pprovided_args = []
381 self._missing_args = []
382 self._masked_installed = set()
383 self._masked_license_updates = set()
384 self._unsatisfied_deps_for_display = []
385 self._unsatisfied_blockers_for_display = None
386 self._circular_deps_for_display = None
388 self._dep_disjunctive_stack = []
389 self._unsatisfied_deps = []
390 self._initially_unsatisfied_deps = []
391 self._ignored_deps = []
392 self._highest_pkg_cache = {}
394 # Binary packages that have been rejected because their USE
395 # didn't match the user's config. It maps packages to a set
396 # of flags causing the rejection.
397 self.ignored_binaries = {}
399 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
400 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
401 self._needed_license_changes = backtrack_parameters.needed_license_changes
402 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
403 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
404 self._need_restart = False
405 # For conditions that always require user intervention, such as
406 # unsatisfied REQUIRED_USE (currently has no autounmask support).
407 self._skip_restart = False
408 self._backtrack_infos = {}
410 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
411 self._success_without_autounmask = False
412 self._traverse_ignored_deps = False
414 for myroot in depgraph._frozen_config.trees:
415 self.sets[myroot] = _depgraph_sets()
416 self._slot_pkg_map[myroot] = {}
417 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
418 # This dbapi instance will model the state that the vdb will
419 # have after new packages have been installed.
420 fakedb = PackageVirtualDbapi(vardb.settings)
422 self.mydbapi[myroot] = fakedb
425 graph_tree.dbapi = fakedb
426 self._graph_trees[myroot] = {}
427 self._filtered_trees[myroot] = {}
428 # Substitute the graph tree for the vartree in dep_check() since we
429 # want atom selections to be consistent with package selections
430 # have already been made.
431 self._graph_trees[myroot]["porttree"] = graph_tree
432 self._graph_trees[myroot]["vartree"] = graph_tree
433 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
434 self._graph_trees[myroot]["graph"] = self.digraph
437 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
438 self._filtered_trees[myroot]["porttree"] = filtered_tree
439 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
441 # Passing in graph_tree as the vartree here could lead to better
442 # atom selections in some cases by causing atoms for packages that
443 # have been added to the graph to be preferred over other choices.
444 # However, it can trigger atom selections that result in
445 # unresolvable direct circular dependencies. For example, this
446 # happens with gwydion-dylan which depends on either itself or
447 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
448 # gwydion-dylan-bin needs to be selected in order to avoid a
449 # an unresolvable direct circular dependency.
451 # To solve the problem described above, pass in "graph_db" so that
452 # packages that have been added to the graph are distinguishable
453 # from other available packages and installed packages. Also, pass
454 # the parent package into self._select_atoms() calls so that
455 # unresolvable direct circular dependencies can be detected and
456 # avoided when possible.
457 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
458 self._filtered_trees[myroot]["graph"] = self.digraph
459 self._filtered_trees[myroot]["vartree"] = \
460 depgraph._frozen_config.trees[myroot]["vartree"]
463 # (db, pkg_type, built, installed, db_keys)
464 if "remove" in self.myparams:
465 # For removal operations, use _dep_check_composite_db
466 # for availability and visibility checks. This provides
467 # consistency with install operations, so we don't
468 # get install/uninstall cycles like in bug #332719.
469 self._graph_trees[myroot]["porttree"] = filtered_tree
471 if "--usepkgonly" not in depgraph._frozen_config.myopts:
472 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
473 db_keys = list(portdb._aux_cache_keys)
474 dbs.append((portdb, "ebuild", False, False, db_keys))
476 if "--usepkg" in depgraph._frozen_config.myopts:
477 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
478 db_keys = list(bindb._aux_cache_keys)
479 dbs.append((bindb, "binary", True, False, db_keys))
481 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
482 db_keys = list(depgraph._frozen_config._trees_orig[myroot
483 ]["vartree"].dbapi._aux_cache_keys)
484 dbs.append((vardb, "installed", True, True, db_keys))
485 self._filtered_trees[myroot]["dbs"] = dbs
487 class depgraph(object):
489 pkg_tree_map = RootConfig.pkg_tree_map
491 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
493 def __init__(self, settings, trees, myopts, myparams, spinner,
494 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
495 if frozen_config is None:
496 frozen_config = _frozen_depgraph_config(settings, trees,
498 self._frozen_config = frozen_config
499 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
500 allow_backtracking, backtrack_parameters)
501 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
503 self._select_atoms = self._select_atoms_highest_available
504 self._select_package = self._select_pkg_highest_available
508 Load installed package metadata if appropriate. This used to be called
509 from the constructor, but that wasn't very nice since this procedure
510 is slow and it generates spinner output. So, now it's called on-demand
511 by various methods when necessary.
514 if self._dynamic_config._vdb_loaded:
517 for myroot in self._frozen_config.trees:
519 dynamic_deps = self._dynamic_config.myparams.get(
520 "dynamic_deps", "y") != "n"
521 preload_installed_pkgs = \
522 "--nodeps" not in self._frozen_config.myopts
524 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
525 if not fake_vartree.dbapi:
526 # This needs to be called for the first depgraph, but not for
527 # backtracking depgraphs that share the same frozen_config.
530 # FakeVartree.sync() populates virtuals, and we want
531 # self.pkgsettings to have them populated too.
532 self._frozen_config.pkgsettings[myroot] = \
533 portage.config(clone=fake_vartree.settings)
535 if preload_installed_pkgs:
536 vardb = fake_vartree.dbapi
537 fakedb = self._dynamic_config._graph_trees[
538 myroot]["vartree"].dbapi
541 self._spinner_update()
543 # This causes FakeVartree to update the
544 # Package instance dependencies via
545 # PackageVirtualDbapi.aux_update()
546 vardb.aux_get(pkg.cpv, [])
547 fakedb.cpv_inject(pkg)
549 self._dynamic_config._vdb_loaded = True
551 def _spinner_update(self):
552 if self._frozen_config.spinner:
553 self._frozen_config.spinner.update()
555 def _show_ignored_binaries(self):
557 Show binaries that have been ignored because their USE didn't
558 match the user's config.
560 if not self._dynamic_config.ignored_binaries \
561 or '--quiet' in self._frozen_config.myopts \
562 or self._dynamic_config.myparams.get(
563 "binpkg_respect_use") in ("y", "n"):
566 for pkg in list(self._dynamic_config.ignored_binaries):
568 selected_pkg = self._dynamic_config.mydbapi[pkg.root
569 ].match_pkgs(pkg.slot_atom)
574 selected_pkg = selected_pkg[-1]
575 if selected_pkg > pkg:
576 self._dynamic_config.ignored_binaries.pop(pkg)
579 if selected_pkg.installed and \
580 selected_pkg.cpv == pkg.cpv and \
581 selected_pkg.metadata.get('BUILD_TIME') == \
582 pkg.metadata.get('BUILD_TIME'):
583 # We don't care about ignored binaries when an
584 # identical installed instance is selected to
586 self._dynamic_config.ignored_binaries.pop(pkg)
589 if not self._dynamic_config.ignored_binaries:
592 self._show_merge_list()
594 writemsg("\n!!! The following binary packages have been ignored " + \
595 "due to non matching USE:\n\n", noiselevel=-1)
597 for pkg, flags in self._dynamic_config.ignored_binaries.items():
598 writemsg(" =%s" % pkg.cpv, noiselevel=-1)
600 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
601 writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
606 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
607 " from ignoring these binary packages if possible.",
608 " Using --binpkg-respect-use=y will silence this warning."
613 line = colorize("INFORM", line)
614 writemsg_stdout(line + "\n", noiselevel=-1)
616 def _show_missed_update(self):
618 # In order to minimize noise, show only the highest
619 # missed update from each SLOT.
621 for pkg, mask_reasons in \
622 self._dynamic_config._runtime_pkg_mask.items():
624 # Exclude installed here since we only
625 # want to show available updates.
627 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
628 ].match_pkgs(pkg.slot_atom)
629 if not chosen_pkg or chosen_pkg[-1] >= pkg:
631 k = (pkg.root, pkg.slot_atom)
632 if k in missed_updates:
633 other_pkg, mask_type, parent_atoms = missed_updates[k]
636 for mask_type, parent_atoms in mask_reasons.items():
639 missed_updates[k] = (pkg, mask_type, parent_atoms)
642 if not missed_updates:
645 missed_update_types = {}
646 for pkg, mask_type, parent_atoms in missed_updates.values():
647 missed_update_types.setdefault(mask_type,
648 []).append((pkg, parent_atoms))
650 if '--quiet' in self._frozen_config.myopts and \
651 '--debug' not in self._frozen_config.myopts:
652 missed_update_types.pop("slot conflict", None)
653 missed_update_types.pop("missing dependency", None)
655 self._show_missed_update_slot_conflicts(
656 missed_update_types.get("slot conflict"))
658 self._show_missed_update_unsatisfied_dep(
659 missed_update_types.get("missing dependency"))
661 def _show_missed_update_unsatisfied_dep(self, missed_updates):
663 if not missed_updates:
666 self._show_merge_list()
667 backtrack_masked = []
669 for pkg, parent_atoms in missed_updates:
672 for parent, root, atom in parent_atoms:
673 self._show_unsatisfied_dep(root, atom, myparent=parent,
674 check_backtrack=True)
675 except self._backtrack_mask:
676 # This is displayed below in abbreviated form.
677 backtrack_masked.append((pkg, parent_atoms))
680 writemsg("\n!!! The following update has been skipped " + \
681 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
683 writemsg(str(pkg.slot_atom), noiselevel=-1)
685 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
686 writemsg("\n", noiselevel=-1)
688 for parent, root, atom in parent_atoms:
689 self._show_unsatisfied_dep(root, atom, myparent=parent)
690 writemsg("\n", noiselevel=-1)
693 # These are shown in abbreviated form, in order to avoid terminal
694 # flooding from mask messages as reported in bug #285832.
695 writemsg("\n!!! The following update(s) have been skipped " + \
696 "due to unsatisfied dependencies\n" + \
697 "!!! triggered by backtracking:\n\n", noiselevel=-1)
698 for pkg, parent_atoms in backtrack_masked:
699 writemsg(str(pkg.slot_atom), noiselevel=-1)
701 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
702 writemsg("\n", noiselevel=-1)
704 def _show_missed_update_slot_conflicts(self, missed_updates):
706 if not missed_updates:
709 self._show_merge_list()
711 msg.append("\nWARNING: One or more updates have been " + \
712 "skipped due to a dependency conflict:\n\n")
715 for pkg, parent_atoms in missed_updates:
716 msg.append(str(pkg.slot_atom))
718 msg.append(" for %s" % (pkg.root,))
721 for parent, atom in parent_atoms:
725 msg.append(" conflicts with\n")
727 if isinstance(parent,
728 (PackageArg, AtomArg)):
729 # For PackageArg and AtomArg types, it's
730 # redundant to display the atom attribute.
731 msg.append(str(parent))
733 # Display the specific atom from SetArg or
735 msg.append("%s required by %s" % (atom, parent))
739 writemsg("".join(msg), noiselevel=-1)
741 def _show_slot_collision_notice(self):
742 """Show an informational message advising the user to mask one of the
743 the packages. In some cases it may be possible to resolve this
744 automatically, but support for backtracking (removal nodes that have
745 already been selected) will be required in order to handle all possible
749 if not self._dynamic_config._slot_collision_info:
752 self._show_merge_list()
754 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
755 handler = self._dynamic_config._slot_conflict_handler
757 conflict = handler.get_conflict()
758 writemsg(conflict, noiselevel=-1)
760 explanation = handler.get_explanation()
762 writemsg(explanation, noiselevel=-1)
765 if "--quiet" in self._frozen_config.myopts:
769 msg.append("It may be possible to solve this problem ")
770 msg.append("by using package.mask to prevent one of ")
771 msg.append("those packages from being selected. ")
772 msg.append("However, it is also possible that conflicting ")
773 msg.append("dependencies exist such that they are impossible to ")
774 msg.append("satisfy simultaneously. If such a conflict exists in ")
775 msg.append("the dependencies of two different packages, then those ")
776 msg.append("packages can not be installed simultaneously.")
777 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
778 if not self._dynamic_config._allow_backtracking and \
779 (backtrack_opt is None or \
780 (backtrack_opt > 0 and backtrack_opt < 30)):
781 msg.append(" You may want to try a larger value of the ")
782 msg.append("--backtrack option, such as --backtrack=30, ")
783 msg.append("in order to see if that will solve this conflict ")
784 msg.append("automatically.")
786 for line in textwrap.wrap(''.join(msg), 70):
787 writemsg(line + '\n', noiselevel=-1)
788 writemsg('\n', noiselevel=-1)
791 msg.append("For more information, see MASKED PACKAGES ")
792 msg.append("section in the emerge man page or refer ")
793 msg.append("to the Gentoo Handbook.")
794 for line in textwrap.wrap(''.join(msg), 70):
795 writemsg(line + '\n', noiselevel=-1)
796 writemsg('\n', noiselevel=-1)
798 def _process_slot_conflicts(self):
800 Process slot conflict data to identify specific atoms which
801 lead to conflict. These atoms only match a subset of the
802 packages that have been pulled into a given slot.
804 for (slot_atom, root), slot_nodes \
805 in self._dynamic_config._slot_collision_info.items():
807 all_parent_atoms = set()
808 for pkg in slot_nodes:
809 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
812 all_parent_atoms.update(parent_atoms)
814 for pkg in slot_nodes:
815 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
816 if parent_atoms is None:
818 self._dynamic_config._parent_atoms[pkg] = parent_atoms
819 for parent_atom in all_parent_atoms:
820 if parent_atom in parent_atoms:
822 # Use package set for matching since it will match via
823 # PROVIDE when necessary, while match_from_list does not.
824 parent, atom = parent_atom
825 atom_set = InternalPackageSet(
826 initial_atoms=(atom,), allow_repo=True)
827 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
828 parent_atoms.add(parent_atom)
830 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
832 def _reinstall_for_flags(self, forced_flags,
833 orig_use, orig_iuse, cur_use, cur_iuse):
834 """Return a set of flags that trigger reinstallation, or None if there
835 are no such flags."""
836 if "--newuse" in self._frozen_config.myopts or \
837 self._dynamic_config.myparams.get(
838 "binpkg_respect_use") in ("y", "auto"):
839 flags = set(orig_iuse.symmetric_difference(
840 cur_iuse).difference(forced_flags))
841 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
842 cur_iuse.intersection(cur_use)))
845 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
846 flags = orig_iuse.intersection(orig_use).symmetric_difference(
847 cur_iuse.intersection(cur_use))
852 def _create_graph(self, allow_unsatisfied=False):
853 dep_stack = self._dynamic_config._dep_stack
854 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
855 while dep_stack or dep_disjunctive_stack:
856 self._spinner_update()
858 dep = dep_stack.pop()
859 if isinstance(dep, Package):
860 if not self._add_pkg_deps(dep,
861 allow_unsatisfied=allow_unsatisfied):
864 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
866 if dep_disjunctive_stack:
867 if not self._pop_disjunction(allow_unsatisfied):
871 def _expand_set_args(self, input_args, add_to_digraph=False):
873 Iterate over a list of DependencyArg instances and yield all
874 instances given in the input together with additional SetArg
875 instances that are generated from nested sets.
876 @param input_args: An iterable of DependencyArg instances
877 @type input_args: Iterable
878 @param add_to_digraph: If True then add SetArg instances
879 to the digraph, in order to record parent -> child
880 relationships from nested sets
881 @type add_to_digraph: Boolean
883 @returns: All args given in the input together with additional
884 SetArg instances that are generated from nested sets
887 traversed_set_args = set()
889 for arg in input_args:
890 if not isinstance(arg, SetArg):
894 root_config = arg.root_config
895 depgraph_sets = self._dynamic_config.sets[root_config.root]
898 arg = arg_stack.pop()
899 if arg in traversed_set_args:
901 traversed_set_args.add(arg)
904 self._dynamic_config.digraph.add(arg, None,
905 priority=BlockerDepPriority.instance)
909 # Traverse nested sets and add them to the stack
910 # if they're not already in the graph. Also, graph
911 # edges between parent and nested sets.
912 for token in arg.pset.getNonAtoms():
913 if not token.startswith(SETPREFIX):
915 s = token[len(SETPREFIX):]
916 nested_set = depgraph_sets.sets.get(s)
917 if nested_set is None:
918 nested_set = root_config.sets.get(s)
919 if nested_set is not None:
920 nested_arg = SetArg(arg=token, pset=nested_set,
921 root_config=root_config)
922 arg_stack.append(nested_arg)
924 self._dynamic_config.digraph.add(nested_arg, arg,
925 priority=BlockerDepPriority.instance)
926 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
928 def _add_dep(self, dep, allow_unsatisfied=False):
929 debug = "--debug" in self._frozen_config.myopts
930 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
931 nodeps = "--nodeps" in self._frozen_config.myopts
932 deep = self._dynamic_config.myparams.get("deep", 0)
933 recurse = deep is True or dep.depth <= deep
935 if not buildpkgonly and \
937 not dep.collapsed_priority.ignored and \
938 not dep.collapsed_priority.optional and \
939 dep.parent not in self._dynamic_config._slot_collision_nodes:
940 if dep.parent.onlydeps:
941 # It's safe to ignore blockers if the
942 # parent is an --onlydeps node.
944 # The blocker applies to the root where
945 # the parent is or will be installed.
946 blocker = Blocker(atom=dep.atom,
947 eapi=dep.parent.metadata["EAPI"],
948 priority=dep.priority, root=dep.parent.root)
949 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
952 if dep.child is None:
953 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
954 onlydeps=dep.onlydeps)
956 # The caller has selected a specific package
957 # via self._minimize_packages().
959 existing_node = self._dynamic_config._slot_pkg_map[
960 dep.root].get(dep_pkg.slot_atom)
963 if (dep.collapsed_priority.optional or
964 dep.collapsed_priority.ignored):
965 # This is an unnecessary build-time dep.
967 if allow_unsatisfied:
968 self._dynamic_config._unsatisfied_deps.append(dep)
970 self._dynamic_config._unsatisfied_deps_for_display.append(
971 ((dep.root, dep.atom), {"myparent":dep.parent}))
973 # The parent node should not already be in
974 # runtime_pkg_mask, since that would trigger an
975 # infinite backtracking loop.
976 if self._dynamic_config._allow_backtracking:
977 if dep.parent in self._dynamic_config._runtime_pkg_mask:
978 if "--debug" in self._frozen_config.myopts:
980 "!!! backtracking loop detected: %s %s\n" % \
982 self._dynamic_config._runtime_pkg_mask[
983 dep.parent]), noiselevel=-1)
984 elif not self.need_restart():
985 # Do not backtrack if only USE have to be changed in
986 # order to satisfy the dependency.
987 dep_pkg, existing_node = \
988 self._select_package(dep.root, dep.atom.without_use,
989 onlydeps=dep.onlydeps)
991 self._dynamic_config._backtrack_infos["missing dependency"] = dep
992 self._dynamic_config._need_restart = True
993 if "--debug" in self._frozen_config.myopts:
997 msg.append("backtracking due to unsatisfied dep:")
998 msg.append(" parent: %s" % dep.parent)
999 msg.append(" priority: %s" % dep.priority)
1000 msg.append(" root: %s" % dep.root)
1001 msg.append(" atom: %s" % dep.atom)
1003 writemsg_level("".join("%s\n" % l for l in msg),
1004 noiselevel=-1, level=logging.DEBUG)
1008 self._rebuild.add(dep_pkg, dep)
1010 ignore = dep.collapsed_priority.ignored and \
1011 not self._dynamic_config._traverse_ignored_deps
1012 if not ignore and not self._add_pkg(dep_pkg, dep):
1016 def _check_slot_conflict(self, pkg, atom):
1017 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1020 matches = pkg.cpv == existing_node.cpv
1021 if pkg != existing_node and \
1023 # Use package set for matching since it will match via
1024 # PROVIDE when necessary, while match_from_list does not.
1025 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1026 allow_repo=True).findAtomForPackage(existing_node,
1027 modified_use=self._pkg_use_enabled(existing_node)))
1029 return (existing_node, matches)
1031 def _add_pkg(self, pkg, dep):
1033 Adds a package to the depgraph, queues dependencies, and handles
1036 debug = "--debug" in self._frozen_config.myopts
1043 myparent = dep.parent
1044 priority = dep.priority
1046 if priority is None:
1047 priority = DepPriority()
1051 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1052 pkg_use_display(pkg, self._frozen_config.myopts,
1053 modified_use=self._pkg_use_enabled(pkg))),
1054 level=logging.DEBUG, noiselevel=-1)
1055 if isinstance(myparent,
1056 (PackageArg, AtomArg)):
1057 # For PackageArg and AtomArg types, it's
1058 # redundant to display the atom attribute.
1060 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1061 level=logging.DEBUG, noiselevel=-1)
1063 # Display the specific atom from SetArg or
1066 "%s%s required by %s\n" %
1067 ("Parent Dep:".ljust(15), dep.atom, myparent),
1068 level=logging.DEBUG, noiselevel=-1)
1070 # Ensure that the dependencies of the same package
1071 # are never processed more than once.
1072 previously_added = pkg in self._dynamic_config.digraph
1074 # select the correct /var database that we'll be checking against
1075 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
1076 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1081 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1082 except portage.exception.InvalidDependString as e:
1083 if not pkg.installed:
1084 # should have been masked before it was selected
1088 # NOTE: REQUIRED_USE checks are delayed until after
1089 # package selection, since we want to prompt the user
1090 # for USE adjustment rather than have REQUIRED_USE
1091 # affect package selection and || dep choices.
1092 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
1093 eapi_has_required_use(pkg.metadata["EAPI"]):
1094 required_use_is_sat = check_required_use(
1095 pkg.metadata["REQUIRED_USE"],
1096 self._pkg_use_enabled(pkg),
1097 pkg.iuse.is_valid_flag)
1098 if not required_use_is_sat:
1099 if dep.atom is not None and dep.parent is not None:
1100 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1103 for parent_atom in arg_atoms:
1104 parent, atom = parent_atom
1105 self._add_parent_atom(pkg, parent_atom)
1109 atom = Atom("=" + pkg.cpv)
1110 self._dynamic_config._unsatisfied_deps_for_display.append(
1111 ((pkg.root, atom), {"myparent":dep.parent}))
1112 self._dynamic_config._skip_restart = True
1115 if not pkg.onlydeps:
1117 existing_node, existing_node_matches = \
1118 self._check_slot_conflict(pkg, dep.atom)
1119 slot_collision = False
1121 if existing_node_matches:
1122 # The existing node can be reused.
1124 for parent_atom in arg_atoms:
1125 parent, atom = parent_atom
1126 self._dynamic_config.digraph.add(existing_node, parent,
1128 self._add_parent_atom(existing_node, parent_atom)
1129 # If a direct circular dependency is not an unsatisfied
1130 # buildtime dependency then drop it here since otherwise
1131 # it can skew the merge order calculation in an unwanted
1133 if existing_node != myparent or \
1134 (priority.buildtime and not priority.satisfied):
1135 self._dynamic_config.digraph.addnode(existing_node, myparent,
1137 if dep.atom is not None and dep.parent is not None:
1138 self._add_parent_atom(existing_node,
1139 (dep.parent, dep.atom))
1142 # A slot conflict has occurred.
1143 # The existing node should not already be in
1144 # runtime_pkg_mask, since that would trigger an
1145 # infinite backtracking loop.
1146 if self._dynamic_config._allow_backtracking and \
1148 self._dynamic_config._runtime_pkg_mask:
1149 if "--debug" in self._frozen_config.myopts:
1151 "!!! backtracking loop detected: %s %s\n" % \
1153 self._dynamic_config._runtime_pkg_mask[
1154 existing_node]), noiselevel=-1)
1155 elif self._dynamic_config._allow_backtracking and \
1156 not self._accept_blocker_conflicts() and \
1157 not self.need_restart():
1159 self._add_slot_conflict(pkg)
1160 if dep.atom is not None and dep.parent is not None:
1161 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1164 for parent_atom in arg_atoms:
1165 parent, atom = parent_atom
1166 self._add_parent_atom(pkg, parent_atom)
1167 self._process_slot_conflicts()
1172 # The ordering of backtrack_data can make
1173 # a difference here, because both mask actions may lead
1174 # to valid, but different, solutions and the one with
1175 # 'existing_node' masked is usually the better one. Because
1176 # of that, we choose an order such that
1177 # the backtracker will first explore the choice with
1178 # existing_node masked. The backtracker reverses the
1179 # order, so the order it uses is the reverse of the
1180 # order shown here. See bug #339606.
1181 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
1182 # For missed update messages, find out which
1183 # atoms matched to_be_selected that did not
1184 # match to_be_masked.
1186 self._dynamic_config._parent_atoms.get(to_be_selected, set())
1188 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
1190 parent_atoms = conflict_atoms
1192 all_parents.update(parent_atoms)
1195 for parent, atom in parent_atoms:
1196 i = InternalPackageSet(initial_atoms=(atom,),
1198 if not i.findAtomForPackage(to_be_masked):
1202 fallback_data.append((to_be_masked, parent_atoms))
1205 # 'to_be_masked' does not violate any parent atom, which means
1206 # there is no point in masking it.
1209 backtrack_data.append((to_be_masked, parent_atoms))
1211 if not backtrack_data:
1212 # This shouldn't happen, but fall back to the old
1213 # behavior if this gets triggered somehow.
1214 backtrack_data = fallback_data
1216 if len(backtrack_data) > 1:
1217 # NOTE: Generally, we prefer to mask the higher
1218 # version since this solves common cases in which a
1219 # lower version is needed so that all dependencies
1220 # will be satisfied (bug #337178). However, if
1221 # existing_node happens to be installed then we
1222 # mask that since this is a common case that is
1223 # triggered when --update is not enabled.
1224 if existing_node.installed:
1226 elif pkg > existing_node:
1227 backtrack_data.reverse()
1229 to_be_masked = backtrack_data[-1][0]
1231 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
1232 self._dynamic_config._need_restart = True
1233 if "--debug" in self._frozen_config.myopts:
1237 msg.append("backtracking due to slot conflict:")
1238 if backtrack_data is fallback_data:
1239 msg.append("!!! backtrack_data fallback")
1240 msg.append(" first package: %s" % existing_node)
1241 msg.append(" second package: %s" % pkg)
1242 msg.append(" package to mask: %s" % to_be_masked)
1243 msg.append(" slot: %s" % pkg.slot_atom)
1244 msg.append(" parents: %s" % ", ".join( \
1245 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1247 writemsg_level("".join("%s\n" % l for l in msg),
1248 noiselevel=-1, level=logging.DEBUG)
1251 # A slot collision has occurred. Sometimes this coincides
1252 # with unresolvable blockers, so the slot collision will be
1253 # shown later if there are no unresolvable blockers.
1254 self._add_slot_conflict(pkg)
1255 slot_collision = True
1259 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1260 existing_node, pkg_use_display(existing_node,
1261 self._frozen_config.myopts,
1262 modified_use=self._pkg_use_enabled(existing_node))),
1263 level=logging.DEBUG, noiselevel=-1)
1266 # Now add this node to the graph so that self.display()
1267 # can show use flags and --tree portage.output. This node is
1268 # only being partially added to the graph. It must not be
1269 # allowed to interfere with the other nodes that have been
1270 # added. Do not overwrite data for existing nodes in
1271 # self._dynamic_config.mydbapi since that data will be used for blocker
1273 # Even though the graph is now invalid, continue to process
1274 # dependencies so that things like --fetchonly can still
1275 # function despite collisions.
1277 elif not previously_added:
1278 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1279 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1280 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1281 self._dynamic_config._highest_pkg_cache.clear()
1282 self._check_masks(pkg)
1284 if not pkg.installed:
1285 # Allow this package to satisfy old-style virtuals in case it
1286 # doesn't already. Any pre-existing providers will be preferred
1289 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1290 # For consistency, also update the global virtuals.
1291 settings = self._frozen_config.roots[pkg.root].settings
1293 settings.setinst(pkg.cpv, pkg.metadata)
1295 except portage.exception.InvalidDependString as e:
1296 if not pkg.installed:
1297 # should have been masked before it was selected
1301 self._dynamic_config._set_nodes.add(pkg)
1303 # Do this even when addme is False (--onlydeps) so that the
1304 # parent/child relationship is always known in case
1305 # self._show_slot_collision_notice() needs to be called later.
1306 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1307 if dep.atom is not None and dep.parent is not None:
1308 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1311 for parent_atom in arg_atoms:
1312 parent, atom = parent_atom
1313 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1314 self._add_parent_atom(pkg, parent_atom)
1316 """ This section determines whether we go deeper into dependencies or not.
1317 We want to go deeper on a few occasions:
1318 Installing package A, we need to make sure package A's deps are met.
1319 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1320 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1325 deep = self._dynamic_config.myparams.get("deep", 0)
1326 recurse = deep is True or depth + 1 <= deep
1327 dep_stack = self._dynamic_config._dep_stack
1328 if "recurse" not in self._dynamic_config.myparams:
1330 elif pkg.installed and not recurse:
1331 dep_stack = self._dynamic_config._ignored_deps
1333 self._spinner_update()
1335 if not previously_added:
1336 dep_stack.append(pkg)
1339 def _check_masks(self, pkg):
1341 slot_key = (pkg.root, pkg.slot_atom)
1343 # Check for upgrades in the same slot that are
1344 # masked due to a LICENSE change in a newer
1345 # version that is not masked for any other reason.
1346 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1347 if other_pkg is not None and pkg < other_pkg:
1348 self._dynamic_config._masked_license_updates.add(other_pkg)
1350 def _add_parent_atom(self, pkg, parent_atom):
1351 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1352 if parent_atoms is None:
1353 parent_atoms = set()
1354 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1355 parent_atoms.add(parent_atom)
1357 def _add_slot_conflict(self, pkg):
1358 self._dynamic_config._slot_collision_nodes.add(pkg)
1359 slot_key = (pkg.slot_atom, pkg.root)
1360 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1361 if slot_nodes is None:
1363 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1364 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1367 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1369 mytype = pkg.type_name
1372 metadata = pkg.metadata
1373 myuse = self._pkg_use_enabled(pkg)
1375 depth = pkg.depth + 1
1376 removal_action = "remove" in self._dynamic_config.myparams
1379 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1381 edepend[k] = metadata[k]
1383 if not pkg.built and \
1384 "--buildpkgonly" in self._frozen_config.myopts and \
1385 "deep" not in self._dynamic_config.myparams:
1386 edepend["RDEPEND"] = ""
1387 edepend["PDEPEND"] = ""
1389 ignore_build_time_deps = False
1390 if pkg.built and not removal_action:
1391 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1392 # Pull in build time deps as requested, but marked them as
1393 # "optional" since they are not strictly required. This allows
1394 # more freedom in the merge order calculation for solving
1395 # circular dependencies. Don't convert to PDEPEND since that
1396 # could make --with-bdeps=y less effective if it is used to
1397 # adjust merge order to prevent built_with_use() calls from
1401 ignore_build_time_deps = True
1403 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1404 # Removal actions never traverse ignored buildtime
1405 # dependencies, so it's safe to discard them early.
1406 edepend["DEPEND"] = ""
1407 ignore_build_time_deps = True
1410 depend_root = myroot
1412 depend_root = self._frozen_config._running_root.root
1413 root_deps = self._frozen_config.myopts.get("--root-deps")
1414 if root_deps is not None:
1415 if root_deps is True:
1416 depend_root = myroot
1417 elif root_deps == "rdeps":
1418 ignore_build_time_deps = True
1420 # If rebuild mode is not enabled, it's safe to discard ignored
1421 # build-time dependencies. If you want these deps to be traversed
1422 # in "complete" mode then you need to specify --with-bdeps=y.
1423 if ignore_build_time_deps and \
1424 not self._rebuild.rebuild:
1425 edepend["DEPEND"] = ""
1428 (depend_root, edepend["DEPEND"],
1429 self._priority(buildtime=True,
1430 optional=(pkg.built or ignore_build_time_deps),
1431 ignored=ignore_build_time_deps)),
1432 (myroot, edepend["RDEPEND"],
1433 self._priority(runtime=True)),
1434 (myroot, edepend["PDEPEND"],
1435 self._priority(runtime_post=True))
1438 debug = "--debug" in self._frozen_config.myopts
1439 strict = mytype != "installed"
1441 for dep_root, dep_string, dep_priority in deps:
1445 writemsg_level("\nParent: %s\n" % (pkg,),
1446 noiselevel=-1, level=logging.DEBUG)
1447 writemsg_level("Depstring: %s\n" % (dep_string,),
1448 noiselevel=-1, level=logging.DEBUG)
1449 writemsg_level("Priority: %s\n" % (dep_priority,),
1450 noiselevel=-1, level=logging.DEBUG)
1453 dep_string = portage.dep.use_reduce(dep_string,
1454 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1455 except portage.exception.InvalidDependString as e:
1456 if not pkg.installed:
1457 # should have been masked before it was selected
1461 # Try again, but omit the is_valid_flag argument, since
1462 # invalid USE conditionals are a common problem and it's
1463 # practical to ignore this issue for installed packages.
1465 dep_string = portage.dep.use_reduce(dep_string,
1466 uselist=self._pkg_use_enabled(pkg))
1467 except portage.exception.InvalidDependString as e:
1468 self._dynamic_config._masked_installed.add(pkg)
1473 dep_string = list(self._queue_disjunctive_deps(
1474 pkg, dep_root, dep_priority, dep_string))
1475 except portage.exception.InvalidDependString as e:
1477 self._dynamic_config._masked_installed.add(pkg)
1481 # should have been masked before it was selected
1487 dep_string = portage.dep.paren_enclose(dep_string,
1488 unevaluated_atom=True)
1490 if not self._add_pkg_dep_string(
1491 pkg, dep_root, dep_priority, dep_string,
1495 self._dynamic_config._traversed_pkg_deps.add(pkg)
1498 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1500 _autounmask_backup = self._dynamic_config._autounmask
1501 if dep_priority.optional or dep_priority.ignored:
1502 # Temporarily disable autounmask for deps that
1503 # don't necessarily need to be satisfied.
1504 self._dynamic_config._autounmask = False
1506 return self._wrapped_add_pkg_dep_string(
1507 pkg, dep_root, dep_priority, dep_string,
1510 self._dynamic_config._autounmask = _autounmask_backup
1512 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1513 dep_string, allow_unsatisfied):
1514 depth = pkg.depth + 1
1515 deep = self._dynamic_config.myparams.get("deep", 0)
1516 recurse_satisfied = deep is True or depth <= deep
1517 debug = "--debug" in self._frozen_config.myopts
1518 strict = pkg.type_name != "installed"
1521 writemsg_level("\nParent: %s\n" % (pkg,),
1522 noiselevel=-1, level=logging.DEBUG)
1523 writemsg_level("Depstring: %s\n" % (dep_string,),
1524 noiselevel=-1, level=logging.DEBUG)
1525 writemsg_level("Priority: %s\n" % (dep_priority,),
1526 noiselevel=-1, level=logging.DEBUG)
1529 selected_atoms = self._select_atoms(dep_root,
1530 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1531 strict=strict, priority=dep_priority)
1532 except portage.exception.InvalidDependString as e:
1534 self._dynamic_config._masked_installed.add(pkg)
1537 # should have been masked before it was selected
1541 writemsg_level("Candidates: %s\n" % \
1542 ([str(x) for x in selected_atoms[pkg]],),
1543 noiselevel=-1, level=logging.DEBUG)
1545 root_config = self._frozen_config.roots[dep_root]
1546 vardb = root_config.trees["vartree"].dbapi
1547 traversed_virt_pkgs = set()
1549 reinstall_atoms = self._frozen_config.reinstall_atoms
1550 for atom, child in self._minimize_children(
1551 pkg, dep_priority, root_config, selected_atoms[pkg]):
1553 # If this was a specially generated virtual atom
1554 # from dep_check, map it back to the original, in
1555 # order to avoid distortion in places like display
1556 # or conflict resolution code.
1557 is_virt = hasattr(atom, '_orig_atom')
1558 atom = getattr(atom, '_orig_atom', atom)
1560 if atom.blocker and \
1561 (dep_priority.optional or dep_priority.ignored):
1562 # For --with-bdeps, ignore build-time only blockers
1563 # that originate from built packages.
1566 mypriority = dep_priority.copy()
1567 if not atom.blocker:
1568 inst_pkgs = [inst_pkg for inst_pkg in
1569 reversed(vardb.match_pkgs(atom))
1570 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1571 modified_use=self._pkg_use_enabled(inst_pkg))]
1573 for inst_pkg in inst_pkgs:
1574 if self._pkg_visibility_check(inst_pkg):
1576 mypriority.satisfied = inst_pkg
1578 if not mypriority.satisfied:
1579 # none visible, so use highest
1580 mypriority.satisfied = inst_pkgs[0]
1582 dep = Dependency(atom=atom,
1583 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1584 priority=mypriority, root=dep_root)
1586 # In some cases, dep_check will return deps that shouldn't
1587 # be proccessed any further, so they are identified and
1588 # discarded here. Try to discard as few as possible since
1589 # discarded dependencies reduce the amount of information
1590 # available for optimization of merge order.
1592 if not atom.blocker and \
1593 not recurse_satisfied and \
1594 mypriority.satisfied and \
1595 mypriority.satisfied.visible and \
1596 dep.child is not None and \
1597 not dep.child.installed and \
1598 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1599 dep.child.slot_atom) is None:
1601 if dep.root == self._frozen_config.target_root:
1603 myarg = next(self._iter_atoms_for_pkg(dep.child))
1604 except StopIteration:
1606 except InvalidDependString:
1607 if not dep.child.installed:
1608 # This shouldn't happen since the package
1609 # should have been masked.
1613 # Existing child selection may not be valid unless
1614 # it's added to the graph immediately, since "complete"
1615 # mode may select a different child later.
1618 self._dynamic_config._ignored_deps.append(dep)
1621 if dep_priority.ignored and \
1622 not self._dynamic_config._traverse_ignored_deps:
1623 if is_virt and dep.child is not None:
1624 traversed_virt_pkgs.add(dep.child)
1626 self._dynamic_config._ignored_deps.append(dep)
1628 if not self._add_dep(dep,
1629 allow_unsatisfied=allow_unsatisfied):
1631 if is_virt and dep.child is not None:
1632 traversed_virt_pkgs.add(dep.child)
1634 selected_atoms.pop(pkg)
1636 # Add selected indirect virtual deps to the graph. This
1637 # takes advantage of circular dependency avoidance that's done
1638 # by dep_zapdeps. We preserve actual parent/child relationships
1639 # here in order to avoid distorting the dependency graph like
1640 # <=portage-2.1.6.x did.
1641 for virt_dep, atoms in selected_atoms.items():
1643 virt_pkg = virt_dep.child
1644 if virt_pkg not in traversed_virt_pkgs:
1648 writemsg_level("\nCandidates: %s: %s\n" % \
1649 (virt_pkg.cpv, [str(x) for x in atoms]),
1650 noiselevel=-1, level=logging.DEBUG)
1652 if not dep_priority.ignored or \
1653 self._dynamic_config._traverse_ignored_deps:
1655 inst_pkgs = [inst_pkg for inst_pkg in
1656 reversed(vardb.match_pkgs(virt_dep.atom))
1657 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1658 modified_use=self._pkg_use_enabled(inst_pkg))]
1660 for inst_pkg in inst_pkgs:
1661 if self._pkg_visibility_check(inst_pkg):
1663 virt_dep.priority.satisfied = inst_pkg
1665 if not virt_dep.priority.satisfied:
1666 # none visible, so use highest
1667 virt_dep.priority.satisfied = inst_pkgs[0]
1669 if not self._add_pkg(virt_pkg, virt_dep):
1672 for atom, child in self._minimize_children(
1673 pkg, self._priority(runtime=True), root_config, atoms):
1675 # If this was a specially generated virtual atom
1676 # from dep_check, map it back to the original, in
1677 # order to avoid distortion in places like display
1678 # or conflict resolution code.
1679 is_virt = hasattr(atom, '_orig_atom')
1680 atom = getattr(atom, '_orig_atom', atom)
1682 # This is a GLEP 37 virtual, so its deps are all runtime.
1683 mypriority = self._priority(runtime=True)
1684 if not atom.blocker:
1685 inst_pkgs = [inst_pkg for inst_pkg in
1686 reversed(vardb.match_pkgs(atom))
1687 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1688 modified_use=self._pkg_use_enabled(inst_pkg))]
1690 for inst_pkg in inst_pkgs:
1691 if self._pkg_visibility_check(inst_pkg):
1693 mypriority.satisfied = inst_pkg
1695 if not mypriority.satisfied:
1696 # none visible, so use highest
1697 mypriority.satisfied = inst_pkgs[0]
1699 # Dependencies of virtuals are considered to have the
1700 # same depth as the virtual itself.
1701 dep = Dependency(atom=atom,
1702 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1703 parent=virt_pkg, priority=mypriority, root=dep_root,
1704 collapsed_parent=pkg, collapsed_priority=dep_priority)
1707 if not atom.blocker and \
1708 not recurse_satisfied and \
1709 mypriority.satisfied and \
1710 mypriority.satisfied.visible and \
1711 dep.child is not None and \
1712 not dep.child.installed and \
1713 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1714 dep.child.slot_atom) is None:
1716 if dep.root == self._frozen_config.target_root:
1718 myarg = next(self._iter_atoms_for_pkg(dep.child))
1719 except StopIteration:
1721 except InvalidDependString:
1722 if not dep.child.installed:
1728 self._dynamic_config._ignored_deps.append(dep)
1731 if dep_priority.ignored and \
1732 not self._dynamic_config._traverse_ignored_deps:
1733 if is_virt and dep.child is not None:
1734 traversed_virt_pkgs.add(dep.child)
1736 self._dynamic_config._ignored_deps.append(dep)
1738 if not self._add_dep(dep,
1739 allow_unsatisfied=allow_unsatisfied):
1741 if is_virt and dep.child is not None:
1742 traversed_virt_pkgs.add(dep.child)
1745 writemsg_level("\nExiting... %s\n" % (pkg,),
1746 noiselevel=-1, level=logging.DEBUG)
1750 def _minimize_children(self, parent, priority, root_config, atoms):
1752 Selects packages to satisfy the given atoms, and minimizes the
1753 number of selected packages. This serves to identify and eliminate
1754 redundant package selections when multiple atoms happen to specify
1764 dep_pkg, existing_node = self._select_package(
1765 root_config.root, atom)
1769 atom_pkg_map[atom] = dep_pkg
1771 if len(atom_pkg_map) < 2:
1772 for item in atom_pkg_map.items():
1778 for atom, pkg in atom_pkg_map.items():
1779 pkg_atom_map.setdefault(pkg, set()).add(atom)
1780 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1782 for cp, pkgs in cp_pkg_map.items():
1785 for atom in pkg_atom_map[pkg]:
1789 # Use a digraph to identify and eliminate any
1790 # redundant package selections.
1791 atom_pkg_graph = digraph()
1794 for atom in pkg_atom_map[pkg1]:
1796 atom_pkg_graph.add(pkg1, atom)
1797 atom_set = InternalPackageSet(initial_atoms=(atom,),
1802 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1803 atom_pkg_graph.add(pkg2, atom)
1806 eliminate_pkg = True
1807 for atom in atom_pkg_graph.parent_nodes(pkg):
1808 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1809 eliminate_pkg = False
1812 atom_pkg_graph.remove(pkg)
1814 # Yield ~, =*, < and <= atoms first, since those are more likely to
1815 # cause slot conflicts, and we want those atoms to be displayed
1816 # in the resulting slot conflict message (see bug #291142).
1819 for atom in cp_atoms:
1821 for child_pkg in atom_pkg_graph.child_nodes(atom):
1822 existing_node, matches = \
1823 self._check_slot_conflict(child_pkg, atom)
1824 if existing_node and not matches:
1828 conflict_atoms.append(atom)
1830 normal_atoms.append(atom)
1832 for atom in chain(conflict_atoms, normal_atoms):
1833 child_pkgs = atom_pkg_graph.child_nodes(atom)
1834 # if more than one child, yield highest version
1835 if len(child_pkgs) > 1:
1837 yield (atom, child_pkgs[-1])
1839 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1841 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1842 Yields non-disjunctive deps. Raises InvalidDependString when
1846 while i < len(dep_struct):
1848 if isinstance(x, list):
1849 for y in self._queue_disjunctive_deps(
1850 pkg, dep_root, dep_priority, x):
1853 self._queue_disjunction(pkg, dep_root, dep_priority,
1854 [ x, dep_struct[ i + 1 ] ] )
1858 x = portage.dep.Atom(x)
1859 except portage.exception.InvalidAtom:
1860 if not pkg.installed:
1861 raise portage.exception.InvalidDependString(
1862 "invalid atom: '%s'" % x)
1864 # Note: Eventually this will check for PROPERTIES=virtual
1865 # or whatever other metadata gets implemented for this
1867 if x.cp.startswith('virtual/'):
1868 self._queue_disjunction( pkg, dep_root,
1869 dep_priority, [ str(x) ] )
1874 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1875 self._dynamic_config._dep_disjunctive_stack.append(
1876 (pkg, dep_root, dep_priority, dep_struct))
1878 def _pop_disjunction(self, allow_unsatisfied):
1880 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1881 populate self._dynamic_config._dep_stack.
1883 pkg, dep_root, dep_priority, dep_struct = \
1884 self._dynamic_config._dep_disjunctive_stack.pop()
1885 dep_string = portage.dep.paren_enclose(dep_struct,
1886 unevaluated_atom=True)
1887 if not self._add_pkg_dep_string(
1888 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1892 def _priority(self, **kwargs):
1893 if "remove" in self._dynamic_config.myparams:
1894 priority_constructor = UnmergeDepPriority
1896 priority_constructor = DepPriority
1897 return priority_constructor(**kwargs)
1899 def _dep_expand(self, root_config, atom_without_category):
1901 @param root_config: a root config instance
1902 @type root_config: RootConfig
1903 @param atom_without_category: an atom without a category component
1904 @type atom_without_category: String
1906 @returns: a list of atoms containing categories (possibly empty)
1908 null_cp = portage.dep_getkey(insert_category_into_atom(
1909 atom_without_category, "null"))
1910 cat, atom_pn = portage.catsplit(null_cp)
1912 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1914 for db, pkg_type, built, installed, db_keys in dbs:
1915 for cat in db.categories:
1916 if db.cp_list("%s/%s" % (cat, atom_pn)):
1920 for cat in categories:
1921 deps.append(Atom(insert_category_into_atom(
1922 atom_without_category, cat), allow_repo=True))
1925 def _have_new_virt(self, root, atom_cp):
1927 for db, pkg_type, built, installed, db_keys in \
1928 self._dynamic_config._filtered_trees[root]["dbs"]:
1929 if db.cp_list(atom_cp):
1934 def _iter_atoms_for_pkg(self, pkg):
1935 depgraph_sets = self._dynamic_config.sets[pkg.root]
1936 atom_arg_map = depgraph_sets.atom_arg_map
1937 root_config = self._frozen_config.roots[pkg.root]
1938 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1939 if atom.cp != pkg.cp and \
1940 self._have_new_virt(pkg.root, atom.cp):
1943 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1944 visible_pkgs.reverse() # descending order
1946 for visible_pkg in visible_pkgs:
1947 if visible_pkg.cp != atom.cp:
1949 if pkg >= visible_pkg:
1950 # This is descending order, and we're not
1951 # interested in any versions <= pkg given.
1953 if pkg.slot_atom != visible_pkg.slot_atom:
1954 higher_slot = visible_pkg
1956 if higher_slot is not None:
1958 for arg in atom_arg_map[(atom, pkg.root)]:
1959 if isinstance(arg, PackageArg) and \
1964 def select_files(self, myfiles):
1965 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1966 self._dynamic_config._initial_arg_list and call self._resolve to create the
1967 appropriate depgraph and return a favorite list."""
1969 debug = "--debug" in self._frozen_config.myopts
1970 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1971 sets = root_config.sets
1972 depgraph_sets = self._dynamic_config.sets[root_config.root]
1974 eroot = root_config.root
1975 root = root_config.settings['ROOT']
1976 dbs = self._dynamic_config._filtered_trees[eroot]["dbs"]
1977 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
1978 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
1979 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
1980 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
1981 pkgsettings = self._frozen_config.pkgsettings[eroot]
1983 onlydeps = "--onlydeps" in self._frozen_config.myopts
1986 ext = os.path.splitext(x)[1]
1988 if not os.path.exists(x):
1990 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1991 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1992 elif os.path.exists(
1993 os.path.join(pkgsettings["PKGDIR"], x)):
1994 x = os.path.join(pkgsettings["PKGDIR"], x)
1996 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1997 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1998 return 0, myfavorites
1999 mytbz2=portage.xpak.tbz2(x)
2000 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2001 if os.path.realpath(x) != \
2002 os.path.realpath(bindb.bintree.getname(mykey)):
2003 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2004 self._dynamic_config._skip_restart = True
2005 return 0, myfavorites
2007 pkg = self._pkg(mykey, "binary", root_config,
2009 args.append(PackageArg(arg=x, package=pkg,
2010 root_config=root_config))
2011 elif ext==".ebuild":
2012 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2013 pkgdir = os.path.dirname(ebuild_path)
2014 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2015 cp = pkgdir[len(tree_root)+1:]
2016 e = portage.exception.PackageNotFound(
2017 ("%s is not in a valid portage tree " + \
2018 "hierarchy or does not exist") % x)
2019 if not portage.isvalidatom(cp):
2021 cat = portage.catsplit(cp)[0]
2022 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2023 if not portage.isvalidatom("="+mykey):
2025 ebuild_path = portdb.findname(mykey)
2027 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2028 cp, os.path.basename(ebuild_path)):
2029 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2030 self._dynamic_config._skip_restart = True
2031 return 0, myfavorites
2032 if mykey not in portdb.xmatch(
2033 "match-visible", portage.cpv_getkey(mykey)):
2034 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2035 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2036 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2037 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2040 raise portage.exception.PackageNotFound(
2041 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2042 pkg = self._pkg(mykey, "ebuild", root_config,
2043 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2044 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2045 args.append(PackageArg(arg=x, package=pkg,
2046 root_config=root_config))
2047 elif x.startswith(os.path.sep):
2048 if not x.startswith(root):
2049 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2050 " $ROOT.\n") % x, noiselevel=-1)
2051 self._dynamic_config._skip_restart = True
2053 # Queue these up since it's most efficient to handle
2054 # multiple files in a single iter_owners() call.
2055 lookup_owners.append(x)
2056 elif x.startswith("." + os.sep) or \
2057 x.startswith(".." + os.sep):
2058 f = os.path.abspath(x)
2059 if not f.startswith(root):
2060 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2061 " $ROOT.\n") % (f, x), noiselevel=-1)
2062 self._dynamic_config._skip_restart = True
2064 lookup_owners.append(f)
2066 if x in ("system", "world"):
2068 if x.startswith(SETPREFIX):
2069 s = x[len(SETPREFIX):]
2071 raise portage.exception.PackageSetNotFound(s)
2072 if s in depgraph_sets.sets:
2075 depgraph_sets.sets[s] = pset
2076 args.append(SetArg(arg=x, pset=pset,
2077 root_config=root_config))
2079 if not is_valid_package_atom(x, allow_repo=True):
2080 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2082 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2083 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2084 self._dynamic_config._skip_restart = True
2086 # Don't expand categories or old-style virtuals here unless
2087 # necessary. Expansion of old-style virtuals here causes at
2088 # least the following problems:
2089 # 1) It's more difficult to determine which set(s) an atom
2090 # came from, if any.
2091 # 2) It takes away freedom from the resolver to choose other
2092 # possible expansions when necessary.
2094 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2095 root_config=root_config))
2097 expanded_atoms = self._dep_expand(root_config, x)
2098 installed_cp_set = set()
2099 for atom in expanded_atoms:
2100 if vardb.cp_list(atom.cp):
2101 installed_cp_set.add(atom.cp)
2103 if len(installed_cp_set) > 1:
2104 non_virtual_cps = set()
2105 for atom_cp in installed_cp_set:
2106 if not atom_cp.startswith("virtual/"):
2107 non_virtual_cps.add(atom_cp)
2108 if len(non_virtual_cps) == 1:
2109 installed_cp_set = non_virtual_cps
2111 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2112 installed_cp = next(iter(installed_cp_set))
2113 for atom in expanded_atoms:
2114 if atom.cp == installed_cp:
2116 for pkg in self._iter_match_pkgs_any(
2117 root_config, atom.without_use,
2119 if not pkg.installed:
2123 expanded_atoms = [atom]
2126 # If a non-virtual package and one or more virtual packages
2127 # are in expanded_atoms, use the non-virtual package.
2128 if len(expanded_atoms) > 1:
2129 number_of_virtuals = 0
2130 for expanded_atom in expanded_atoms:
2131 if expanded_atom.cp.startswith("virtual/"):
2132 number_of_virtuals += 1
2134 candidate = expanded_atom
2135 if len(expanded_atoms) - number_of_virtuals == 1:
2136 expanded_atoms = [ candidate ]
2138 if len(expanded_atoms) > 1:
2139 writemsg("\n\n", noiselevel=-1)
2140 ambiguous_package_name(x, expanded_atoms, root_config,
2141 self._frozen_config.spinner, self._frozen_config.myopts)
2142 self._dynamic_config._skip_restart = True
2143 return False, myfavorites
2145 atom = expanded_atoms[0]
2147 null_atom = Atom(insert_category_into_atom(x, "null"),
2149 cat, atom_pn = portage.catsplit(null_atom.cp)
2150 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2152 # Allow the depgraph to choose which virtual.
2153 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2158 if atom.use and atom.use.conditional:
2160 ("\n\n!!! '%s' contains a conditional " + \
2161 "which is not allowed.\n") % (x,), noiselevel=-1)
2162 writemsg("!!! Please check ebuild(5) for full details.\n")
2163 self._dynamic_config._skip_restart = True
2166 args.append(AtomArg(arg=x, atom=atom,
2167 root_config=root_config))
2171 search_for_multiple = False
2172 if len(lookup_owners) > 1:
2173 search_for_multiple = True
2175 for x in lookup_owners:
2176 if not search_for_multiple and os.path.isdir(x):
2177 search_for_multiple = True
2178 relative_paths.append(x[len(root)-1:])
2181 for pkg, relative_path in \
2182 real_vardb._owners.iter_owners(relative_paths):
2183 owners.add(pkg.mycpv)
2184 if not search_for_multiple:
2188 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2189 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2190 self._dynamic_config._skip_restart = True
2194 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2196 # portage now masks packages with missing slot, but it's
2197 # possible that one was installed by an older version
2198 atom = Atom(portage.cpv_getkey(cpv))
2200 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2201 args.append(AtomArg(arg=atom, atom=atom,
2202 root_config=root_config))
2204 if "--update" in self._frozen_config.myopts:
2205 # In some cases, the greedy slots behavior can pull in a slot that
2206 # the user would want to uninstall due to it being blocked by a
2207 # newer version in a different slot. Therefore, it's necessary to
2208 # detect and discard any that should be uninstalled. Each time
2209 # that arguments are updated, package selections are repeated in
2210 # order to ensure consistency with the current arguments:
2212 # 1) Initialize args
2213 # 2) Select packages and generate initial greedy atoms
2214 # 3) Update args with greedy atoms
2215 # 4) Select packages and generate greedy atoms again, while
2216 # accounting for any blockers between selected packages
2217 # 5) Update args with revised greedy atoms
2219 self._set_args(args)
2222 greedy_args.append(arg)
2223 if not isinstance(arg, AtomArg):
2225 for atom in self._greedy_slots(arg.root_config, arg.atom):
2227 AtomArg(arg=arg.arg, atom=atom,
2228 root_config=arg.root_config))
2230 self._set_args(greedy_args)
2233 # Revise greedy atoms, accounting for any blockers
2234 # between selected packages.
2235 revised_greedy_args = []
2237 revised_greedy_args.append(arg)
2238 if not isinstance(arg, AtomArg):
2240 for atom in self._greedy_slots(arg.root_config, arg.atom,
2241 blocker_lookahead=True):
2242 revised_greedy_args.append(
2243 AtomArg(arg=arg.arg, atom=atom,
2244 root_config=arg.root_config))
2245 args = revised_greedy_args
2246 del revised_greedy_args
2248 self._set_args(args)
2250 myfavorites = set(myfavorites)
2252 if isinstance(arg, (AtomArg, PackageArg)):
2253 myfavorites.add(arg.atom)
2254 elif isinstance(arg, SetArg):
2255 myfavorites.add(arg.arg)
2256 myfavorites = list(myfavorites)
2259 portage.writemsg("\n", noiselevel=-1)
2260 # Order needs to be preserved since a feature of --nodeps
2261 # is to allow the user to force a specific merge order.
2262 self._dynamic_config._initial_arg_list = args[:]
2264 return self._resolve(myfavorites)
2266 def _resolve(self, myfavorites):
2267 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2268 call self._creategraph to process theier deps and return
2270 debug = "--debug" in self._frozen_config.myopts
2271 onlydeps = "--onlydeps" in self._frozen_config.myopts
2272 myroot = self._frozen_config.target_root
2273 pkgsettings = self._frozen_config.pkgsettings[myroot]
2274 pprovideddict = pkgsettings.pprovideddict
2275 virtuals = pkgsettings.getvirtuals()
2276 args = self._dynamic_config._initial_arg_list[:]
2277 for root, atom in chain(self._rebuild.rebuild_list,
2278 self._rebuild.reinstall_list):
2279 args.append(AtomArg(arg=atom, atom=atom,
2280 root_config=self._frozen_config.roots[root]))
2281 for arg in self._expand_set_args(args, add_to_digraph=True):
2282 for atom in arg.pset.getAtoms():
2283 self._spinner_update()
2284 dep = Dependency(atom=atom, onlydeps=onlydeps,
2285 root=myroot, parent=arg)
2287 pprovided = pprovideddict.get(atom.cp)
2288 if pprovided and portage.match_from_list(atom, pprovided):
2289 # A provided package has been specified on the command line.
2290 self._dynamic_config._pprovided_args.append((arg, atom))
2292 if isinstance(arg, PackageArg):
2293 if not self._add_pkg(arg.package, dep) or \
2294 not self._create_graph():
2295 if not self.need_restart():
2296 sys.stderr.write(("\n\n!!! Problem " + \
2297 "resolving dependencies for %s\n") % \
2299 return 0, myfavorites
2302 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2303 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2304 pkg, existing_node = self._select_package(
2305 myroot, atom, onlydeps=onlydeps)
2307 pprovided_match = False
2308 for virt_choice in virtuals.get(atom.cp, []):
2309 expanded_atom = portage.dep.Atom(
2310 atom.replace(atom.cp, virt_choice.cp, 1))
2311 pprovided = pprovideddict.get(expanded_atom.cp)
2313 portage.match_from_list(expanded_atom, pprovided):
2314 # A provided package has been
2315 # specified on the command line.
2316 self._dynamic_config._pprovided_args.append((arg, atom))
2317 pprovided_match = True
2322 if not (isinstance(arg, SetArg) and \
2323 arg.name in ("selected", "system", "world")):
2324 self._dynamic_config._unsatisfied_deps_for_display.append(
2325 ((myroot, atom), {"myparent" : arg}))
2326 return 0, myfavorites
2328 self._dynamic_config._missing_args.append((arg, atom))
2330 if atom.cp != pkg.cp:
2331 # For old-style virtuals, we need to repeat the
2332 # package.provided check against the selected package.
2333 expanded_atom = atom.replace(atom.cp, pkg.cp)
2334 pprovided = pprovideddict.get(pkg.cp)
2336 portage.match_from_list(expanded_atom, pprovided):
2337 # A provided package has been
2338 # specified on the command line.
2339 self._dynamic_config._pprovided_args.append((arg, atom))
2341 if pkg.installed and \
2342 "selective" not in self._dynamic_config.myparams and \
2343 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2344 pkg, modified_use=self._pkg_use_enabled(pkg)):
2345 self._dynamic_config._unsatisfied_deps_for_display.append(
2346 ((myroot, atom), {"myparent" : arg}))
2347 # Previous behavior was to bail out in this case, but
2348 # since the dep is satisfied by the installed package,
2349 # it's more friendly to continue building the graph
2350 # and just show a warning message. Therefore, only bail
2351 # out here if the atom is not from either the system or
2353 if not (isinstance(arg, SetArg) and \
2354 arg.name in ("selected", "system", "world")):
2355 return 0, myfavorites
2357 # Add the selected package to the graph as soon as possible
2358 # so that later dep_check() calls can use it as feedback
2359 # for making more consistent atom selections.
2360 if not self._add_pkg(pkg, dep):
2361 if self.need_restart():
2363 elif isinstance(arg, SetArg):
2364 writemsg(("\n\n!!! Problem resolving " + \
2365 "dependencies for %s from %s\n") % \
2366 (atom, arg.arg), noiselevel=-1)
2368 writemsg(("\n\n!!! Problem resolving " + \
2369 "dependencies for %s\n") % \
2370 (atom,), noiselevel=-1)
2371 return 0, myfavorites
2373 except SystemExit as e:
2374 raise # Needed else can't exit
2375 except Exception as e:
2376 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2377 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2380 # Now that the root packages have been added to the graph,
2381 # process the dependencies.
2382 if not self._create_graph():
2383 return 0, myfavorites
2387 except self._unknown_internal_error:
2388 return False, myfavorites
2390 digraph_set = frozenset(self._dynamic_config.digraph)
2392 if digraph_set.intersection(
2393 self._dynamic_config._needed_unstable_keywords) or \
2394 digraph_set.intersection(
2395 self._dynamic_config._needed_p_mask_changes) or \
2396 digraph_set.intersection(
2397 self._dynamic_config._needed_use_config_changes) or \
2398 digraph_set.intersection(
2399 self._dynamic_config._needed_license_changes) :
2400 #We failed if the user needs to change the configuration
2401 self._dynamic_config._success_without_autounmask = True
2402 return False, myfavorites
2406 if self._rebuild.trigger_rebuilds():
2407 backtrack_infos = self._dynamic_config._backtrack_infos
2408 config = backtrack_infos.setdefault("config", {})
2409 config["rebuild_list"] = self._rebuild.rebuild_list
2410 config["reinstall_list"] = self._rebuild.reinstall_list
2411 self._dynamic_config._need_restart = True
2412 return False, myfavorites
2414 # We're true here unless we are missing binaries.
2415 return (True, myfavorites)
2417 def _set_args(self, args):
2419 Create the "__non_set_args__" package set from atoms and packages given as
2420 arguments. This method can be called multiple times if necessary.
2421 The package selection cache is automatically invalidated, since
2422 arguments influence package selections.
2427 for root in self._dynamic_config.sets:
2428 depgraph_sets = self._dynamic_config.sets[root]
2429 depgraph_sets.sets.setdefault('__non_set_args__',
2430 InternalPackageSet(allow_repo=True)).clear()
2431 depgraph_sets.atoms.clear()
2432 depgraph_sets.atom_arg_map.clear()
2433 set_atoms[root] = []
2434 non_set_atoms[root] = []
2436 # We don't add set args to the digraph here since that
2437 # happens at a later stage and we don't want to make
2438 # any state changes here that aren't reversed by a
2439 # another call to this method.
2440 for arg in self._expand_set_args(args, add_to_digraph=False):
2441 atom_arg_map = self._dynamic_config.sets[
2442 arg.root_config.root].atom_arg_map
2443 if isinstance(arg, SetArg):
2444 atom_group = set_atoms[arg.root_config.root]
2446 atom_group = non_set_atoms[arg.root_config.root]
2448 for atom in arg.pset.getAtoms():
2449 atom_group.append(atom)
2450 atom_key = (atom, arg.root_config.root)
2451 refs = atom_arg_map.get(atom_key)
2454 atom_arg_map[atom_key] = refs
2458 for root in self._dynamic_config.sets:
2459 depgraph_sets = self._dynamic_config.sets[root]
2460 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2461 non_set_atoms.get(root, [])))
2462 depgraph_sets.sets['__non_set_args__'].update(
2463 non_set_atoms.get(root, []))
2465 # Invalidate the package selection cache, since
2466 # arguments influence package selections.
2467 self._dynamic_config._highest_pkg_cache.clear()
2468 for trees in self._dynamic_config._filtered_trees.values():
2469 trees["porttree"].dbapi._clear_cache()
2471 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2473 Return a list of slot atoms corresponding to installed slots that
2474 differ from the slot of the highest visible match. When
2475 blocker_lookahead is True, slot atoms that would trigger a blocker
2476 conflict are automatically discarded, potentially allowing automatic
2477 uninstallation of older slots when appropriate.
2479 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2480 if highest_pkg is None:
2482 vardb = root_config.trees["vartree"].dbapi
2484 for cpv in vardb.match(atom):
2485 # don't mix new virtuals with old virtuals
2486 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2487 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2489 slots.add(highest_pkg.metadata["SLOT"])
2493 slots.remove(highest_pkg.metadata["SLOT"])
2496 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2497 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2498 if pkg is not None and \
2499 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2500 greedy_pkgs.append(pkg)
2503 if not blocker_lookahead:
2504 return [pkg.slot_atom for pkg in greedy_pkgs]
2507 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2508 for pkg in greedy_pkgs + [highest_pkg]:
2509 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2511 selected_atoms = self._select_atoms(
2512 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2513 parent=pkg, strict=True)
2514 except portage.exception.InvalidDependString:
2517 for atoms in selected_atoms.values():
2518 blocker_atoms.extend(x for x in atoms if x.blocker)
2519 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2521 if highest_pkg not in blockers:
2524 # filter packages with invalid deps
2525 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2527 # filter packages that conflict with highest_pkg
2528 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2529 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2530 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2535 # If two packages conflict, discard the lower version.
2536 discard_pkgs = set()
2537 greedy_pkgs.sort(reverse=True)
2538 for i in range(len(greedy_pkgs) - 1):
2539 pkg1 = greedy_pkgs[i]
2540 if pkg1 in discard_pkgs:
2542 for j in range(i + 1, len(greedy_pkgs)):
2543 pkg2 = greedy_pkgs[j]
2544 if pkg2 in discard_pkgs:
2546 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2547 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2549 discard_pkgs.add(pkg2)
2551 return [pkg.slot_atom for pkg in greedy_pkgs \
2552 if pkg not in discard_pkgs]
2554 def _select_atoms_from_graph(self, *pargs, **kwargs):
2556 Prefer atoms matching packages that have already been
2557 added to the graph or those that are installed and have
2558 not been scheduled for replacement.
2560 kwargs["trees"] = self._dynamic_config._graph_trees
2561 return self._select_atoms_highest_available(*pargs, **kwargs)
2563 def _select_atoms_highest_available(self, root, depstring,
2564 myuse=None, parent=None, strict=True, trees=None, priority=None):
2565 """This will raise InvalidDependString if necessary. If trees is
2566 None then self._dynamic_config._filtered_trees is used."""
2568 pkgsettings = self._frozen_config.pkgsettings[root]
2570 trees = self._dynamic_config._filtered_trees
2571 mytrees = trees[root]
2572 atom_graph = digraph()
2574 # Temporarily disable autounmask so that || preferences
2575 # account for masking and USE settings.
2576 _autounmask_backup = self._dynamic_config._autounmask
2577 self._dynamic_config._autounmask = False
2578 # backup state for restoration, in case of recursive
2579 # calls to this method
2580 backup_state = mytrees.copy()
2582 # clear state from previous call, in case this
2583 # call is recursive (we have a backup, that we
2584 # will use to restore it later)
2585 mytrees.pop("pkg_use_enabled", None)
2586 mytrees.pop("parent", None)
2587 mytrees.pop("atom_graph", None)
2588 mytrees.pop("priority", None)
2590 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2591 if parent is not None:
2592 mytrees["parent"] = parent
2593 mytrees["atom_graph"] = atom_graph
2594 if priority is not None:
2595 mytrees["priority"] = priority
2597 mycheck = portage.dep_check(depstring, None,
2598 pkgsettings, myuse=myuse,
2599 myroot=root, trees=trees)
2602 self._dynamic_config._autounmask = _autounmask_backup
2603 mytrees.pop("pkg_use_enabled", None)
2604 mytrees.pop("parent", None)
2605 mytrees.pop("atom_graph", None)
2606 mytrees.pop("priority", None)
2607 mytrees.update(backup_state)
2609 raise portage.exception.InvalidDependString(mycheck[1])
2611 selected_atoms = mycheck[1]
2612 elif parent not in atom_graph:
2613 selected_atoms = {parent : mycheck[1]}
2615 # Recursively traversed virtual dependencies, and their
2616 # direct dependencies, are considered to have the same
2617 # depth as direct dependencies.
2618 if parent.depth is None:
2621 virt_depth = parent.depth + 1
2622 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2623 selected_atoms = OrderedDict()
2624 node_stack = [(parent, None, None)]
2625 traversed_nodes = set()
2627 node, node_parent, parent_atom = node_stack.pop()
2628 traversed_nodes.add(node)
2632 if node_parent is parent:
2633 if priority is None:
2634 node_priority = None
2636 node_priority = priority.copy()
2638 # virtuals only have runtime deps
2639 node_priority = self._priority(runtime=True)
2641 k = Dependency(atom=parent_atom,
2642 blocker=parent_atom.blocker, child=node,
2643 depth=virt_depth, parent=node_parent,
2644 priority=node_priority, root=node.root)
2647 selected_atoms[k] = child_atoms
2648 for atom_node in atom_graph.child_nodes(node):
2649 child_atom = atom_node[0]
2650 if id(child_atom) not in chosen_atom_ids:
2652 child_atoms.append(child_atom)
2653 for child_node in atom_graph.child_nodes(atom_node):
2654 if child_node in traversed_nodes:
2656 if not portage.match_from_list(
2657 child_atom, [child_node]):
2658 # Typically this means that the atom
2659 # specifies USE deps that are unsatisfied
2660 # by the selected package. The caller will
2661 # record this as an unsatisfied dependency
2664 node_stack.append((child_node, node, child_atom))
2666 return selected_atoms
2668 def _expand_virt_from_graph(self, root, atom):
2669 if not isinstance(atom, Atom):
2671 graphdb = self._dynamic_config.mydbapi[root]
2672 match = graphdb.match_pkgs(atom)
2677 if not pkg.cpv.startswith("virtual/"):
2681 rdepend = self._select_atoms_from_graph(
2682 pkg.root, pkg.metadata.get("RDEPEND", ""),
2683 myuse=self._pkg_use_enabled(pkg),
2684 parent=pkg, strict=False)
2685 except InvalidDependString as e:
2686 writemsg_level("!!! Invalid RDEPEND in " + \
2687 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2688 (pkg.root, pkg.cpv, e),
2689 noiselevel=-1, level=logging.ERROR)
2693 for atoms in rdepend.values():
2695 if hasattr(atom, "_orig_atom"):
2696 # Ignore virtual atoms since we're only
2697 # interested in expanding the real atoms.
2701 def _virt_deps_visible(self, pkg, ignore_use=False):
2703 Assumes pkg is a virtual package. Traverses virtual deps recursively
2704 and returns True if all deps are visible, False otherwise. This is
2705 useful for checking if it will be necessary to expand virtual slots,
2706 for cases like bug #382557.
2709 rdepend = self._select_atoms(
2710 pkg.root, pkg.metadata.get("RDEPEND", ""),
2711 myuse=self._pkg_use_enabled(pkg),
2712 parent=pkg, priority=self._priority(runtime=True))
2713 except InvalidDependString as e:
2714 if not pkg.installed:
2716 writemsg_level("!!! Invalid RDEPEND in " + \
2717 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2718 (pkg.root, pkg.cpv, e),
2719 noiselevel=-1, level=logging.ERROR)
2722 for atoms in rdepend.values():
2725 atom = atom.without_use
2726 pkg, existing = self._select_package(
2728 if pkg is None or not self._pkg_visibility_check(pkg):
2733 def _get_dep_chain(self, start_node, target_atom=None,
2734 unsatisfied_dependency=False):
2736 Returns a list of (atom, node_type) pairs that represent a dep chain.
2737 If target_atom is None, the first package shown is pkg's parent.
2738 If target_atom is not None the first package shown is pkg.
2739 If unsatisfied_dependency is True, the first parent is select who's
2740 dependency is not satisfied by 'pkg'. This is need for USE changes.
2741 (Does not support target_atom.)
2743 traversed_nodes = set()
2747 all_parents = self._dynamic_config._parent_atoms
2749 if target_atom is not None and isinstance(node, Package):
2750 affecting_use = set()
2751 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2753 affecting_use.update(extract_affecting_use(
2754 node.metadata[dep_str], target_atom,
2755 eapi=node.metadata["EAPI"]))
2756 except InvalidDependString:
2757 if not node.installed:
2759 affecting_use.difference_update(node.use.mask, node.use.force)
2760 pkg_name = _unicode_decode("%s") % (node.cpv,)
2763 for flag in affecting_use:
2764 if flag in self._pkg_use_enabled(node):
2767 usedep.append("-"+flag)
2768 pkg_name += "[%s]" % ",".join(usedep)
2770 dep_chain.append((pkg_name, node.type_name))
2773 # To build a dep chain for the given package we take
2774 # "random" parents form the digraph, except for the
2775 # first package, because we want a parent that forced
2776 # the corresponding change (i.e '>=foo-2', instead 'foo').
2778 traversed_nodes.add(start_node)
2780 start_node_parent_atoms = {}
2781 for ppkg, patom in all_parents.get(node, []):
2782 # Get a list of suitable atoms. For use deps
2783 # (aka unsatisfied_dependency is not None) we
2784 # need that the start_node doesn't match the atom.
2785 if not unsatisfied_dependency or \
2786 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
2787 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
2789 if start_node_parent_atoms:
2790 # If there are parents in all_parents then use one of them.
2791 # If not, then this package got pulled in by an Arg and
2792 # will be correctly handled by the code that handles later
2793 # packages in the dep chain.
2794 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
2797 for ppkg in start_node_parent_atoms[best_match]:
2799 if ppkg in self._dynamic_config._initial_arg_list:
2800 # Stop if reached the top level of the dep chain.
2803 while node is not None:
2804 traversed_nodes.add(node)
2806 if isinstance(node, DependencyArg):
2807 if self._dynamic_config.digraph.parent_nodes(node):
2810 node_type = "argument"
2811 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2813 elif node is not start_node:
2814 for ppkg, patom in all_parents[child]:
2816 atom = patom.unevaluated_atom
2820 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2821 if priority.buildtime:
2822 dep_strings.add(node.metadata["DEPEND"])
2823 if priority.runtime:
2824 dep_strings.add(node.metadata["RDEPEND"])
2825 if priority.runtime_post:
2826 dep_strings.add(node.metadata["PDEPEND"])
2828 affecting_use = set()
2829 for dep_str in dep_strings:
2831 affecting_use.update(extract_affecting_use(
2832 dep_str, atom, eapi=node.metadata["EAPI"]))
2833 except InvalidDependString:
2834 if not node.installed:
2837 #Don't show flags as 'affecting' if the user can't change them,
2838 affecting_use.difference_update(node.use.mask, \
2841 pkg_name = _unicode_decode("%s") % (node.cpv,)
2844 for flag in affecting_use:
2845 if flag in self._pkg_use_enabled(node):
2848 usedep.append("-"+flag)
2849 pkg_name += "[%s]" % ",".join(usedep)
2851 dep_chain.append((pkg_name, node.type_name))
2853 if node not in self._dynamic_config.digraph:
2854 # The parent is not in the graph due to backtracking.
2857 # When traversing to parents, prefer arguments over packages
2858 # since arguments are root nodes. Never traverse the same
2859 # package twice, in order to prevent an infinite loop.
2861 selected_parent = None
2864 parent_unsatisfied = None
2866 for parent in self._dynamic_config.digraph.parent_nodes(node):
2867 if parent in traversed_nodes:
2869 if isinstance(parent, DependencyArg):
2872 if isinstance(parent, Package) and \
2873 parent.operation == "merge":
2874 parent_merge = parent
2875 if unsatisfied_dependency and node is start_node:
2876 # Make sure that pkg doesn't satisfy parent's dependency.
2877 # This ensures that we select the correct parent for use
2879 for ppkg, atom in all_parents[start_node]:
2881 atom_set = InternalPackageSet(initial_atoms=(atom,))
2882 if not atom_set.findAtomForPackage(start_node):
2883 parent_unsatisfied = parent
2886 selected_parent = parent
2888 if parent_unsatisfied is not None:
2889 selected_parent = parent_unsatisfied
2890 elif parent_merge is not None:
2891 # Prefer parent in the merge list (bug #354747).
2892 selected_parent = parent_merge
2893 elif parent_arg is not None:
2894 if self._dynamic_config.digraph.parent_nodes(parent_arg):
2895 selected_parent = parent_arg
2898 (_unicode_decode("%s") % (parent_arg,), "argument"))
2899 selected_parent = None
2901 node = selected_parent
2904 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2905 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2907 for node, node_type in dep_chain:
2908 if node_type == "argument":
2909 display_list.append("required by %s (argument)" % node)
2911 display_list.append("required by %s" % node)
2913 msg = "#" + ", ".join(display_list) + "\n"
2917 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2918 check_backtrack=False, check_autounmask_breakage=False):
2920 When check_backtrack=True, no output is produced and
2921 the method either returns or raises _backtrack_mask if
2922 a matching package has been masked by backtracking.
2924 backtrack_mask = False
2925 autounmask_broke_use_dep = False
2926 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
2928 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
2930 xinfo = '"%s"' % atom.unevaluated_atom
2933 if isinstance(myparent, AtomArg):
2934 xinfo = _unicode_decode('"%s"') % (myparent,)
2935 # Discard null/ from failed cpv_expand category expansion.
2936 xinfo = xinfo.replace("null/", "")
2938 xinfo = "%s for %s" % (xinfo, root)
2939 masked_packages = []
2941 missing_use_adjustable = set()
2942 required_use_unsatisfied = []
2943 masked_pkg_instances = set()
2944 missing_licenses = []
2945 have_eapi_mask = False
2946 pkgsettings = self._frozen_config.pkgsettings[root]
2947 root_config = self._frozen_config.roots[root]
2948 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2949 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2950 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2951 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2952 for db, pkg_type, built, installed, db_keys in dbs:
2956 if hasattr(db, "xmatch"):
2957 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
2959 cpv_list = db.match(atom.without_use)
2961 if atom.repo is None and hasattr(db, "getRepositories"):
2962 repo_list = db.getRepositories()
2964 repo_list = [atom.repo]
2968 for cpv in cpv_list:
2969 for repo in repo_list:
2970 if not db.cpv_exists(cpv, myrepo=repo):
2973 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2974 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2975 if metadata is not None and \
2976 portage.eapi_is_supported(metadata["EAPI"]):
2978 repo = metadata.get('repository')
2979 pkg = self._pkg(cpv, pkg_type, root_config,
2980 installed=installed, myrepo=repo)
2981 if not atom_set.findAtomForPackage(pkg,
2982 modified_use=self._pkg_use_enabled(pkg)):
2984 # pkg.metadata contains calculated USE for ebuilds,
2985 # required later for getMissingLicenses.
2986 metadata = pkg.metadata
2987 if pkg in self._dynamic_config._runtime_pkg_mask:
2988 backtrack_reasons = \
2989 self._dynamic_config._runtime_pkg_mask[pkg]
2990 mreasons.append('backtracking: %s' % \
2991 ', '.join(sorted(backtrack_reasons)))
2992 backtrack_mask = True
2993 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2994 modified_use=self._pkg_use_enabled(pkg)):
2995 mreasons = ["exclude option"]
2997 masked_pkg_instances.add(pkg)
2998 if atom.unevaluated_atom.use:
3000 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3001 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3002 missing_use.append(pkg)
3003 if atom_set_with_use.findAtomForPackage(pkg):
3004 autounmask_broke_use_dep = True
3008 writemsg("violated_conditionals raised " + \
3009 "InvalidAtom: '%s' parent: %s" % \
3010 (atom, myparent), noiselevel=-1)
3012 if not mreasons and \
3014 pkg.metadata["REQUIRED_USE"] and \
3015 eapi_has_required_use(pkg.metadata["EAPI"]):
3016 if not check_required_use(
3017 pkg.metadata["REQUIRED_USE"],
3018 self._pkg_use_enabled(pkg),
3019 pkg.iuse.is_valid_flag):
3020 required_use_unsatisfied.append(pkg)
3022 root_slot = (pkg.root, pkg.slot_atom)
3023 if pkg.built and root_slot in self._rebuild.rebuild_list:
3024 mreasons = ["need to rebuild from source"]
3025 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3026 mreasons = ["need to rebuild from source"]
3027 elif pkg.built and not mreasons:
3028 mreasons = ["use flag configuration mismatch"]
3029 masked_packages.append(
3030 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3034 raise self._backtrack_mask()
3038 if check_autounmask_breakage:
3039 if autounmask_broke_use_dep:
3040 raise self._autounmask_breakage()
3044 missing_use_reasons = []
3045 missing_iuse_reasons = []
3046 for pkg in missing_use:
3047 use = self._pkg_use_enabled(pkg)
3049 #Use the unevaluated atom here, because some flags might have gone
3050 #lost during evaluation.
3051 required_flags = atom.unevaluated_atom.use.required
3052 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3056 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3057 missing_iuse_reasons.append((pkg, mreasons))
3059 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3060 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3062 untouchable_flags = \
3063 frozenset(chain(pkg.use.mask, pkg.use.force))
3064 if untouchable_flags.intersection(
3065 chain(need_enable, need_disable)):
3068 missing_use_adjustable.add(pkg)
3069 required_use = pkg.metadata["REQUIRED_USE"]
3070 required_use_warning = ""
3072 old_use = self._pkg_use_enabled(pkg)
3073 new_use = set(self._pkg_use_enabled(pkg))
3074 for flag in need_enable:
3076 for flag in need_disable:
3077 new_use.discard(flag)
3078 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3079 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3080 required_use_warning = ", this change violates use flag constraints " + \
3081 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3083 if need_enable or need_disable:
3085 changes.extend(colorize("red", "+" + x) \
3086 for x in need_enable)
3087 changes.extend(colorize("blue", "-" + x) \
3088 for x in need_disable)
3089 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3090 missing_use_reasons.append((pkg, mreasons))
3092 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3093 # Lets see if the violated use deps are conditional.
3094 # If so, suggest to change them on the parent.
3096 # If the child package is masked then a change to
3097 # parent USE is not a valid solution (a normal mask
3098 # message should be displayed instead).
3099 if pkg in masked_pkg_instances:
3103 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3104 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3105 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3106 #all violated use deps are conditional
3108 conditional = violated_atom.use.conditional
3109 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3110 conditional.enabled, conditional.disabled))
3112 untouchable_flags = \
3113 frozenset(chain(myparent.use.mask, myparent.use.force))
3114 if untouchable_flags.intersection(involved_flags):
3117 required_use = myparent.metadata["REQUIRED_USE"]
3118 required_use_warning = ""
3120 old_use = self._pkg_use_enabled(myparent)
3121 new_use = set(self._pkg_use_enabled(myparent))
3122 for flag in involved_flags:
3124 new_use.discard(flag)
3127 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
3128 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
3129 required_use_warning = ", this change violates use flag constraints " + \
3130 "defined by %s: '%s'" % (myparent.cpv, \
3131 human_readable_required_use(required_use))
3133 for flag in involved_flags:
3134 if flag in self._pkg_use_enabled(myparent):
3135 changes.append(colorize("blue", "-" + flag))
3137 changes.append(colorize("red", "+" + flag))
3138 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3139 if (myparent, mreasons) not in missing_use_reasons:
3140 missing_use_reasons.append((myparent, mreasons))
3142 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3143 in missing_use_reasons if pkg not in masked_pkg_instances]
3145 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3146 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3148 show_missing_use = False
3149 if unmasked_use_reasons:
3150 # Only show the latest version.
3151 show_missing_use = []
3153 parent_reason = None
3154 for pkg, mreasons in unmasked_use_reasons:
3156 if parent_reason is None:
3157 #This happens if a use change on the parent
3158 #leads to a satisfied conditional use dep.
3159 parent_reason = (pkg, mreasons)
3160 elif pkg_reason is None:
3161 #Don't rely on the first pkg in unmasked_use_reasons,
3162 #being the highest version of the dependency.
3163 pkg_reason = (pkg, mreasons)
3165 show_missing_use.append(pkg_reason)
3167 show_missing_use.append(parent_reason)
3169 elif unmasked_iuse_reasons:
3170 masked_with_iuse = False
3171 for pkg in masked_pkg_instances:
3172 #Use atom.unevaluated here, because some flags might have gone
3173 #lost during evaluation.
3174 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3175 # Package(s) with required IUSE are masked,
3176 # so display a normal masking message.
3177 masked_with_iuse = True
3179 if not masked_with_iuse:
3180 show_missing_use = unmasked_iuse_reasons
3182 if required_use_unsatisfied:
3183 # If there's a higher unmasked version in missing_use_adjustable
3184 # then we want to show that instead.
3185 for pkg in missing_use_adjustable:
3186 if pkg not in masked_pkg_instances and \
3187 pkg > required_use_unsatisfied[0]:
3188 required_use_unsatisfied = False
3193 if required_use_unsatisfied:
3194 # We have an unmasked package that only requires USE adjustment
3195 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3196 # that the user wants the latest version, so only the first
3197 # instance is displayed.
3198 pkg = required_use_unsatisfied[0]
3199 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3200 writemsg_stdout("\n!!! " + \
3201 colorize("BAD", "The ebuild selected to satisfy ") + \
3202 colorize("INFORM", xinfo) + \
3203 colorize("BAD", " has unmet requirements.") + "\n",
3205 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3206 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
3208 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
3209 "are unsatisfied:\n", noiselevel=-1)
3210 reduced_noise = check_required_use(
3211 pkg.metadata["REQUIRED_USE"],
3212 self._pkg_use_enabled(pkg),
3213 pkg.iuse.is_valid_flag).tounicode()
3214 writemsg_stdout(" %s\n" % \
3215 human_readable_required_use(reduced_noise),
3217 normalized_required_use = \
3218 " ".join(pkg.metadata["REQUIRED_USE"].split())
3219 if reduced_noise != normalized_required_use:
3220 writemsg_stdout("\n The above constraints " + \
3221 "are a subset of the following complete expression:\n",
3223 writemsg_stdout(" %s\n" % \
3224 human_readable_required_use(normalized_required_use),
3226 writemsg_stdout("\n", noiselevel=-1)
3228 elif show_missing_use:
3229 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3230 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3231 for pkg, mreasons in show_missing_use:
3232 writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3234 elif masked_packages:
3235 writemsg_stdout("\n!!! " + \
3236 colorize("BAD", "All ebuilds that could satisfy ") + \
3237 colorize("INFORM", xinfo) + \
3238 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3239 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3240 have_eapi_mask = show_masked_packages(masked_packages)
3242 writemsg_stdout("\n", noiselevel=-1)
3243 msg = ("The current version of portage supports " + \
3244 "EAPI '%s'. You must upgrade to a newer version" + \
3245 " of portage before EAPI masked packages can" + \
3246 " be installed.") % portage.const.EAPI
3247 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3248 writemsg_stdout("\n", noiselevel=-1)
3252 if not atom.cp.startswith("null/"):
3253 for pkg in self._iter_match_pkgs_any(
3254 root_config, Atom(atom.cp)):
3258 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3259 if isinstance(myparent, AtomArg) and \
3261 self._frozen_config.myopts.get(
3262 "--misspell-suggestions", "y") != "n":
3263 cp = myparent.atom.cp.lower()
3264 cat, pkg = portage.catsplit(cp)
3268 writemsg_stdout("\nemerge: searching for similar names..."
3272 all_cp.update(vardb.cp_all())
3273 all_cp.update(portdb.cp_all())
3274 if "--usepkg" in self._frozen_config.myopts:
3275 all_cp.update(bindb.cp_all())
3276 # discard dir containing no ebuilds
3280 for cp_orig in all_cp:
3281 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3282 all_cp = set(orig_cp_map)
3285 matches = difflib.get_close_matches(cp, all_cp)
3288 for other_cp in list(all_cp):
3289 other_pkg = portage.catsplit(other_cp)[1]
3290 if other_pkg == pkg:
3291 # discard dir containing no ebuilds
3292 all_cp.discard(other_cp)
3294 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3295 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3297 for pkg_match in pkg_matches:
3298 matches.extend(pkg_to_cp[pkg_match])
3300 matches_orig_case = []
3302 matches_orig_case.extend(orig_cp_map[cp])
3303 matches = matches_orig_case
3305 if len(matches) == 1:
3306 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
3308 elif len(matches) > 1:
3310 "\nemerge: Maybe you meant any of these: %s?\n" % \
3311 (", ".join(matches),), noiselevel=-1)
3313 # Generally, this would only happen if
3314 # all dbapis are empty.
3315 writemsg_stdout(" nothing similar found.\n"
3318 if not isinstance(myparent, AtomArg):
3319 # It's redundant to show parent for AtomArg since
3320 # it's the same as 'xinfo' displayed above.
3321 dep_chain = self._get_dep_chain(myparent, atom)
3322 for node, node_type in dep_chain:
3323 msg.append('(dependency required by "%s" [%s])' % \
3324 (colorize('INFORM', _unicode_decode("%s") % \
3325 (node)), node_type))
3328 writemsg_stdout("\n".join(msg), noiselevel=-1)
3329 writemsg_stdout("\n", noiselevel=-1)
3333 writemsg_stdout("\n", noiselevel=-1)
3335 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3336 for db, pkg_type, built, installed, db_keys in \
3337 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3338 for pkg in self._iter_match_pkgs(root_config,
3339 pkg_type, atom, onlydeps=onlydeps):
3342 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3344 Iterate over Package instances of pkg_type matching the given atom.
3345 This does not check visibility and it also does not match USE for
3346 unbuilt ebuilds since USE are lazily calculated after visibility
3347 checks (to avoid the expense when possible).
3350 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3352 if hasattr(db, "xmatch"):
3353 # For portdbapi we match only against the cpv, in order
3354 # to bypass unnecessary cache access for things like IUSE
3355 # and SLOT. Later, we cache the metadata in a Package
3356 # instance, and use that for further matching. This
3357 # optimization is especially relevant since
3358 # pordbapi.aux_get() does not cache calls that have
3359 # myrepo or mytree arguments.
3360 cpv_list = db.xmatch("match-all-cpv-only", atom)
3362 cpv_list = db.match(atom)
3364 # USE=multislot can make an installed package appear as if
3365 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3366 # won't do any good as long as USE=multislot is enabled since
3367 # the newly built package still won't have the expected slot.
3368 # Therefore, assume that such SLOT dependencies are already
3369 # satisfied rather than forcing a rebuild.
3370 installed = pkg_type == 'installed'
3371 if installed and not cpv_list and atom.slot:
3373 if "remove" in self._dynamic_config.myparams:
3374 # We need to search the portdbapi, which is not in our
3375 # normal dbs list, in order to find the real SLOT.
3376 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
3377 db_keys = list(portdb._aux_cache_keys)
3378 dbs = [(portdb, "ebuild", False, False, db_keys)]
3380 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
3382 for cpv in db.match(atom.cp):
3383 slot_available = False
3384 for other_db, other_type, other_built, \
3385 other_installed, other_keys in dbs:
3388 other_db.aux_get(cpv, ["SLOT"])[0]:
3389 slot_available = True
3393 if not slot_available:
3395 inst_pkg = self._pkg(cpv, "installed",
3396 root_config, installed=installed, myrepo = atom.repo)
3397 # Remove the slot from the atom and verify that
3398 # the package matches the resulting atom.
3399 if portage.match_from_list(
3400 atom.without_slot, [inst_pkg]):
3405 atom_set = InternalPackageSet(initial_atoms=(atom,),
3407 if atom.repo is None and hasattr(db, "getRepositories"):
3408 repo_list = db.getRepositories()
3410 repo_list = [atom.repo]
3414 for cpv in cpv_list:
3415 for repo in repo_list:
3418 pkg = self._pkg(cpv, pkg_type, root_config,
3419 installed=installed, onlydeps=onlydeps, myrepo=repo)
3420 except portage.exception.PackageNotFound:
3423 # A cpv can be returned from dbapi.match() as an
3424 # old-style virtual match even in cases when the
3425 # package does not actually PROVIDE the virtual.
3426 # Filter out any such false matches here.
3428 # Make sure that cpv from the current repo satisfies the atom.
3429 # This might not be the case if there are several repos with
3430 # the same cpv, but different metadata keys, like SLOT.
3431 # Also, for portdbapi, parts of the match that require
3432 # metadata access are deferred until we have cached the
3433 # metadata in a Package instance.
3434 if not atom_set.findAtomForPackage(pkg,
3435 modified_use=self._pkg_use_enabled(pkg)):
3439 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3440 cache_key = (root, atom, onlydeps)
3441 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3444 if pkg and not existing:
3445 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3446 if existing and existing == pkg:
3447 # Update the cache to reflect that the
3448 # package has been added to the graph.
3450 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3452 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3453 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3456 settings = pkg.root_config.settings
3457 if self._pkg_visibility_check(pkg) and \
3458 not (pkg.installed and pkg.masks):
3459 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3462 def _want_installed_pkg(self, pkg):
3464 Given an installed package returned from select_pkg, return
3465 True if the user has not explicitly requested for this package
3466 to be replaced (typically via an atom on the command line).
3468 if "selective" not in self._dynamic_config.myparams and \
3469 pkg.root == self._frozen_config.target_root:
3470 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
3471 modified_use=self._pkg_use_enabled(pkg)):
3474 next(self._iter_atoms_for_pkg(pkg))
3475 except StopIteration:
3477 except portage.exception.InvalidDependString:
3483 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3484 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3486 default_selection = (pkg, existing)
3488 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
3490 if self._dynamic_config._autounmask is True:
3491 if pkg is not None and \
3493 not self._want_installed_pkg(pkg):
3496 for only_use_changes in True, False:
3500 for allow_unmasks in (False, True):
3501 if allow_unmasks and (only_use_changes or autounmask_keep_masks):
3508 self._wrapped_select_pkg_highest_available_imp(
3509 root, atom, onlydeps=onlydeps,
3510 allow_use_changes=True,
3511 allow_unstable_keywords=(not only_use_changes),
3512 allow_license_changes=(not only_use_changes),
3513 allow_unmasks=allow_unmasks)
3515 if pkg is not None and \
3517 not self._want_installed_pkg(pkg):
3520 if self._dynamic_config._need_restart:
3524 # This ensures that we can fall back to an installed package
3525 # that may have been rejected in the autounmask path above.
3526 return default_selection
3528 return pkg, existing
3530 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False,
3531 allow_license_changes=False, allow_unmasks=False, trust_graph=True):
3536 if trust_graph and pkg in self._dynamic_config.digraph:
3537 # Sometimes we need to temporarily disable
3538 # dynamic_config._autounmask, but for overall
3539 # consistency in dependency resolution, in most
3540 # cases we want to treat packages in the graph
3541 # as though they are visible.
3544 if not self._dynamic_config._autounmask:
3547 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3548 root_config = self._frozen_config.roots[pkg.root]
3549 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
3551 masked_by_unstable_keywords = False
3552 masked_by_missing_keywords = False
3553 missing_licenses = None
3554 masked_by_something_else = False
3555 masked_by_p_mask = False
3557 for reason in mreasons:
3558 hint = reason.unmask_hint
3561 masked_by_something_else = True
3562 elif hint.key == "unstable keyword":
3563 masked_by_unstable_keywords = True
3564 if hint.value == "**":
3565 masked_by_missing_keywords = True
3566 elif hint.key == "p_mask":
3567 masked_by_p_mask = True
3568 elif hint.key == "license":
3569 missing_licenses = hint.value
3571 masked_by_something_else = True
3573 if masked_by_something_else:
3576 if pkg in self._dynamic_config._needed_unstable_keywords:
3577 #If the package is already keyworded, remove the mask.
3578 masked_by_unstable_keywords = False
3579 masked_by_missing_keywords = False
3581 if pkg in self._dynamic_config._needed_p_mask_changes:
3582 #If the package is already keyworded, remove the mask.
3583 masked_by_p_mask = False
3585 if missing_licenses:
3586 #If the needed licenses are already unmasked, remove the mask.
3587 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
3589 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
3590 #Package has already been unmasked.
3593 #We treat missing keywords in the same way as masks.
3594 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
3595 (masked_by_missing_keywords and not allow_unmasks) or \
3596 (masked_by_p_mask and not allow_unmasks) or \
3597 (missing_licenses and not allow_license_changes):
3598 #We are not allowed to do the needed changes.
3601 if masked_by_unstable_keywords:
3602 self._dynamic_config._needed_unstable_keywords.add(pkg)
3603 backtrack_infos = self._dynamic_config._backtrack_infos
3604 backtrack_infos.setdefault("config", {})
3605 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
3606 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
3608 if masked_by_p_mask:
3609 self._dynamic_config._needed_p_mask_changes.add(pkg)
3610 backtrack_infos = self._dynamic_config._backtrack_infos
3611 backtrack_infos.setdefault("config", {})
3612 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
3613 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
3615 if missing_licenses:
3616 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
3617 backtrack_infos = self._dynamic_config._backtrack_infos
3618 backtrack_infos.setdefault("config", {})
3619 backtrack_infos["config"].setdefault("needed_license_changes", set())
3620 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
3624 def _pkg_use_enabled(self, pkg, target_use=None):
3626 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
3627 If target_use is given, the need changes are computed to make the package useable.
3628 Example: target_use = { "foo": True, "bar": False }
3629 The flags target_use must be in the pkg's IUSE.
3632 return pkg.use.enabled
3633 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
3635 if target_use is None:
3636 if needed_use_config_change is None:
3637 return pkg.use.enabled
3639 return needed_use_config_change[0]
3641 if needed_use_config_change is not None:
3642 old_use = needed_use_config_change[0]
3644 old_changes = needed_use_config_change[1]
3645 new_changes = old_changes.copy()
3647 old_use = pkg.use.enabled
3652 for flag, state in target_use.items():
3654 if flag not in old_use:
3655 if new_changes.get(flag) == False:
3657 new_changes[flag] = True
3661 if new_changes.get(flag) == True:
3663 new_changes[flag] = False
3664 new_use.update(old_use.difference(target_use))
3666 def want_restart_for_use_change(pkg, new_use):
3667 if pkg not in self._dynamic_config.digraph.nodes:
3670 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
3671 dep = pkg.metadata[key]
3672 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3673 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3675 if old_val != new_val:
3678 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3679 if not parent_atoms:
3682 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
3683 for ppkg, atom in parent_atoms:
3684 if not atom.use or \
3685 not atom.use.required.intersection(changes):
3692 if new_changes != old_changes:
3693 #Don't do the change if it violates REQUIRED_USE.
3694 required_use = pkg.metadata["REQUIRED_USE"]
3695 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3696 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3699 if pkg.use.mask.intersection(new_changes) or \
3700 pkg.use.force.intersection(new_changes):
3703 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
3704 backtrack_infos = self._dynamic_config._backtrack_infos
3705 backtrack_infos.setdefault("config", {})
3706 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
3707 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
3708 if want_restart_for_use_change(pkg, new_use):
3709 self._dynamic_config._need_restart = True
3712 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
3713 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3714 root_config = self._frozen_config.roots[root]
3715 pkgsettings = self._frozen_config.pkgsettings[root]
3716 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3717 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3718 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3719 # List of acceptable packages, ordered by type preference.
3720 matched_packages = []
3721 matched_pkgs_ignore_use = []
3722 highest_version = None
3723 if not isinstance(atom, portage.dep.Atom):
3724 atom = portage.dep.Atom(atom)
3726 have_new_virt = atom_cp.startswith("virtual/") and \
3727 self._have_new_virt(root, atom_cp)
3728 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
3729 existing_node = None
3731 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3732 usepkg = "--usepkg" in self._frozen_config.myopts
3733 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3734 empty = "empty" in self._dynamic_config.myparams
3735 selective = "selective" in self._dynamic_config.myparams
3737 avoid_update = "--update" not in self._frozen_config.myopts
3738 dont_miss_updates = "--update" in self._frozen_config.myopts
3739 use_ebuild_visibility = self._frozen_config.myopts.get(
3740 '--use-ebuild-visibility', 'n') != 'n'
3741 reinstall_atoms = self._frozen_config.reinstall_atoms
3742 usepkg_exclude = self._frozen_config.usepkg_exclude
3743 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
3745 # Behavior of the "selective" parameter depends on
3746 # whether or not a package matches an argument atom.
3747 # If an installed package provides an old-style
3748 # virtual that is no longer provided by an available
3749 # package, the installed package may match an argument
3750 # atom even though none of the available packages do.
3751 # Therefore, "selective" logic does not consider
3752 # whether or not an installed package matches an
3753 # argument atom. It only considers whether or not
3754 # available packages match argument atoms, which is
3755 # represented by the found_available_arg flag.
3756 found_available_arg = False
3757 packages_with_invalid_use_config = []
3758 for find_existing_node in True, False:
3761 for db, pkg_type, built, installed, db_keys in dbs:
3764 if installed and not find_existing_node:
3765 want_reinstall = reinstall or empty or \
3766 (found_available_arg and not selective)
3767 if want_reinstall and matched_packages:
3770 # Ignore USE deps for the initial match since we want to
3771 # ensure that updates aren't missed solely due to the user's
3772 # USE configuration.
3773 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3775 if pkg.cp != atom_cp and have_new_virt:
3776 # pull in a new-style virtual instead
3778 if pkg in self._dynamic_config._runtime_pkg_mask:
3779 # The package has been masked by the backtracking logic
3781 root_slot = (pkg.root, pkg.slot_atom)
3782 if pkg.built and root_slot in self._rebuild.rebuild_list:
3784 if (pkg.installed and
3785 root_slot in self._rebuild.reinstall_list):
3788 if not pkg.installed and \
3789 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3790 modified_use=self._pkg_use_enabled(pkg)):
3793 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
3794 modified_use=self._pkg_use_enabled(pkg)):
3797 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
3798 modified_use=self._pkg_use_enabled(pkg))
3800 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
3801 (not pkg.installed or dont_miss_updates):
3802 # Check if a higher version was rejected due to user
3803 # USE configuration. The packages_with_invalid_use_config
3804 # list only contains unbuilt ebuilds since USE can't
3805 # be changed for built packages.
3806 higher_version_rejected = False
3807 repo_priority = pkg.repo_priority
3808 for rejected in packages_with_invalid_use_config:
3809 if rejected.cp != pkg.cp:
3812 higher_version_rejected = True
3814 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
3815 # If version is identical then compare
3816 # repo priority (see bug #350254).
3817 rej_repo_priority = rejected.repo_priority
3818 if rej_repo_priority is not None and \
3819 (repo_priority is None or
3820 rej_repo_priority > repo_priority):
3821 higher_version_rejected = True
3823 if higher_version_rejected:
3827 reinstall_for_flags = None
3829 if not pkg.installed or \
3830 (matched_packages and not avoid_update):
3831 # Only enforce visibility on installed packages
3832 # if there is at least one other visible package
3833 # available. By filtering installed masked packages
3834 # here, packages that have been masked since they
3835 # were installed can be automatically downgraded
3836 # to an unmasked version. NOTE: This code needs to
3837 # be consistent with masking behavior inside
3838 # _dep_check_composite_db, in order to prevent
3839 # incorrect choices in || deps like bug #351828.
3841 if not self._pkg_visibility_check(pkg, \
3842 allow_unstable_keywords=allow_unstable_keywords,
3843 allow_license_changes=allow_license_changes,
3844 allow_unmasks=allow_unmasks):
3847 # Enable upgrade or downgrade to a version
3848 # with visible KEYWORDS when the installed
3849 # version is masked by KEYWORDS, but never
3850 # reinstall the same exact version only due
3851 # to a KEYWORDS mask. See bug #252167.
3853 if pkg.type_name != "ebuild" and matched_packages:
3854 # Don't re-install a binary package that is
3855 # identical to the currently installed package
3856 # (see bug #354441).
3857 identical_binary = False
3858 if usepkg and pkg.installed:
3859 for selected_pkg in matched_packages:
3860 if selected_pkg.type_name == "binary" and \
3861 selected_pkg.cpv == pkg.cpv and \
3862 selected_pkg.metadata.get('BUILD_TIME') == \
3863 pkg.metadata.get('BUILD_TIME'):
3864 identical_binary = True
3867 if not identical_binary:
3868 # If the ebuild no longer exists or it's
3869 # keywords have been dropped, reject built
3870 # instances (installed or binary).
3871 # If --usepkgonly is enabled, assume that
3872 # the ebuild status should be ignored.
3873 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
3874 if pkg.installed and pkg.masks:
3879 pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
3880 except portage.exception.PackageNotFound:
3881 pkg_eb_visible = False
3882 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3883 "ebuild", Atom("=%s" % (pkg.cpv,))):
3884 if self._pkg_visibility_check(pkg_eb, \
3885 allow_unstable_keywords=allow_unstable_keywords,
3886 allow_license_changes=allow_license_changes,
3887 allow_unmasks=allow_unmasks):
3888 pkg_eb_visible = True
3890 if not pkg_eb_visible:
3893 if not self._pkg_visibility_check(pkg_eb, \
3894 allow_unstable_keywords=allow_unstable_keywords,
3895 allow_license_changes=allow_license_changes,
3896 allow_unmasks=allow_unmasks):
3899 # Calculation of USE for unbuilt ebuilds is relatively
3900 # expensive, so it is only performed lazily, after the
3901 # above visibility checks are complete.
3904 if root == self._frozen_config.target_root:
3906 myarg = next(self._iter_atoms_for_pkg(pkg))
3907 except StopIteration:
3909 except portage.exception.InvalidDependString:
3911 # masked by corruption
3913 if not installed and myarg:
3914 found_available_arg = True
3916 if atom.unevaluated_atom.use:
3917 #Make sure we don't miss a 'missing IUSE'.
3918 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3919 # Don't add this to packages_with_invalid_use_config
3920 # since IUSE cannot be adjusted by the user.
3925 matched_pkgs_ignore_use.append(pkg)
3926 if allow_use_changes and not pkg.built:
3928 for flag in atom.use.enabled:
3929 target_use[flag] = True
3930 for flag in atom.use.disabled:
3931 target_use[flag] = False
3932 use = self._pkg_use_enabled(pkg, target_use)
3934 use = self._pkg_use_enabled(pkg)
3937 can_adjust_use = not pkg.built
3938 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3939 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3941 if atom.use.enabled:
3942 if atom.use.enabled.intersection(missing_disabled):
3944 can_adjust_use = False
3945 need_enabled = atom.use.enabled.difference(use)
3947 need_enabled = need_enabled.difference(missing_enabled)
3951 if pkg.use.mask.intersection(need_enabled):
3952 can_adjust_use = False
3954 if atom.use.disabled:
3955 if atom.use.disabled.intersection(missing_enabled):
3957 can_adjust_use = False
3958 need_disabled = atom.use.disabled.intersection(use)
3960 need_disabled = need_disabled.difference(missing_disabled)
3964 if pkg.use.force.difference(
3965 pkg.use.mask).intersection(need_disabled):
3966 can_adjust_use = False
3970 # Above we must ensure that this package has
3971 # absolutely no use.force, use.mask, or IUSE
3972 # issues that the user typically can't make
3973 # adjustments to solve (see bug #345979).
3974 # FIXME: Conditional USE deps complicate
3975 # issues. This code currently excludes cases
3976 # in which the user can adjust the parent
3977 # package's USE in order to satisfy the dep.
3978 packages_with_invalid_use_config.append(pkg)
3981 if pkg.cp == atom_cp:
3982 if highest_version is None:
3983 highest_version = pkg
3984 elif pkg > highest_version:
3985 highest_version = pkg
3986 # At this point, we've found the highest visible
3987 # match from the current repo. Any lower versions
3988 # from this repo are ignored, so this so the loop
3989 # will always end with a break statement below
3991 if find_existing_node:
3992 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3996 # Use PackageSet.findAtomForPackage()
3997 # for PROVIDE support.
3998 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3999 if highest_version and \
4000 e_pkg.cp == atom_cp and \
4001 e_pkg < highest_version and \
4002 e_pkg.slot_atom != highest_version.slot_atom:
4003 # There is a higher version available in a
4004 # different slot, so this existing node is
4008 matched_packages.append(e_pkg)
4009 existing_node = e_pkg
4011 # Compare built package to current config and
4012 # reject the built package if necessary.
4013 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4014 ("--newuse" in self._frozen_config.myopts or \
4015 "--reinstall" in self._frozen_config.myopts or \
4016 (not installed and self._dynamic_config.myparams.get(
4017 "binpkg_respect_use") in ("y", "auto"))):
4018 iuses = pkg.iuse.all
4019 old_use = self._pkg_use_enabled(pkg)
4021 pkgsettings.setcpv(myeb)
4023 pkgsettings.setcpv(pkg)
4024 now_use = pkgsettings["PORTAGE_USE"].split()
4025 forced_flags = set()
4026 forced_flags.update(pkgsettings.useforce)
4027 forced_flags.update(pkgsettings.usemask)
4029 if myeb and not usepkgonly and not useoldpkg:
4030 cur_iuse = myeb.iuse.all
4031 reinstall_for_flags = self._reinstall_for_flags(forced_flags,
4032 old_use, iuses, now_use, cur_iuse)
4033 if reinstall_for_flags:
4034 if not pkg.installed:
4035 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4037 # Compare current config to installed package
4038 # and do not reinstall if possible.
4039 if not installed and not useoldpkg and \
4040 ("--newuse" in self._frozen_config.myopts or \
4041 "--reinstall" in self._frozen_config.myopts) and \
4042 cpv in vardb.match(atom):
4043 forced_flags = set()
4044 forced_flags.update(pkg.use.force)
4045 forced_flags.update(pkg.use.mask)
4046 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4047 old_use = inst_pkg.use.enabled
4048 old_iuse = inst_pkg.iuse.all
4049 cur_use = self._pkg_use_enabled(pkg)
4050 cur_iuse = pkg.iuse.all
4051 reinstall_for_flags = \
4052 self._reinstall_for_flags(
4053 forced_flags, old_use, old_iuse,
4055 if reinstall_for_flags:
4057 if reinstall_atoms.findAtomForPackage(pkg, \
4058 modified_use=self._pkg_use_enabled(pkg)):
4063 matched_oldpkg.append(pkg)
4064 matched_packages.append(pkg)
4065 if reinstall_for_flags:
4066 self._dynamic_config._reinstall_nodes[pkg] = \
4070 if not matched_packages:
4073 if "--debug" in self._frozen_config.myopts:
4074 for pkg in matched_packages:
4075 portage.writemsg("%s %s%s%s\n" % \
4076 ((pkg.type_name + ":").rjust(10),
4077 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4079 # Filter out any old-style virtual matches if they are
4080 # mixed with new-style virtual matches.
4082 if len(matched_packages) > 1 and \
4083 "virtual" == portage.catsplit(cp)[0]:
4084 for pkg in matched_packages:
4087 # Got a new-style virtual, so filter
4088 # out any old-style virtuals.
4089 matched_packages = [pkg for pkg in matched_packages \
4093 if existing_node is not None and \
4094 existing_node in matched_packages:
4095 return existing_node, existing_node
4097 if len(matched_packages) > 1:
4098 if rebuilt_binaries:
4102 for pkg in matched_packages:
4108 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4110 if built_pkg is not None and inst_pkg is not None:
4111 # Only reinstall if binary package BUILD_TIME is
4112 # non-empty, in order to avoid cases like to
4113 # bug #306659 where BUILD_TIME fields are missing
4114 # in local and/or remote Packages file.
4116 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
4117 except (KeyError, ValueError):
4121 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
4122 except (KeyError, ValueError):
4123 installed_timestamp = 0
4125 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4127 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4128 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4129 if built_timestamp and \
4130 built_timestamp > installed_timestamp and \
4131 built_timestamp >= minimal_timestamp:
4132 return built_pkg, existing_node
4134 #Don't care if the binary has an older BUILD_TIME than the installed
4135 #package. This is for closely tracking a binhost.
4136 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4138 if built_timestamp and \
4139 built_timestamp != installed_timestamp:
4140 return built_pkg, existing_node
4142 for pkg in matched_packages:
4143 if pkg.installed and pkg.invalid:
4144 matched_packages = [x for x in \
4145 matched_packages if x is not pkg]
4148 for pkg in matched_packages:
4149 if pkg.installed and self._pkg_visibility_check(pkg, \
4150 allow_unstable_keywords=allow_unstable_keywords,
4151 allow_license_changes=allow_license_changes,
4152 allow_unmasks=allow_unmasks):
4153 return pkg, existing_node
4155 visible_matches = []
4157 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4158 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
4159 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
4160 if not visible_matches:
4161 visible_matches = [pkg.cpv for pkg in matched_packages \
4162 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
4163 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
4165 bestmatch = portage.best(visible_matches)
4167 # all are masked, so ignore visibility
4168 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4169 matched_packages = [pkg for pkg in matched_packages \
4170 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4172 # ordered by type preference ("ebuild" type is the last resort)
4173 return matched_packages[-1], existing_node
4175 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4177 Select packages that have already been added to the graph or
4178 those that are installed and have not been scheduled for
4181 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4182 matches = graph_db.match_pkgs(atom)
4185 pkg = matches[-1] # highest match
4186 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4187 return pkg, in_graph
4189 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4191 Select packages that are installed.
4193 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4197 if len(matches) > 1:
4198 matches.reverse() # ascending order
4199 unmasked = [pkg for pkg in matches if \
4200 self._pkg_visibility_check(pkg)]
4202 if len(unmasked) == 1:
4205 # Account for packages with masks (like KEYWORDS masks)
4206 # that are usually ignored in visibility checks for
4207 # installed packages, in order to handle cases like
4209 unmasked = [pkg for pkg in matches if not pkg.masks]
4212 pkg = matches[-1] # highest match
4213 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4214 return pkg, in_graph
4216 def _complete_graph(self, required_sets=None):
4218 Add any deep dependencies of required sets (args, system, world) that
4219 have not been pulled into the graph yet. This ensures that the graph
4220 is consistent such that initially satisfied deep dependencies are not
4221 broken in the new graph. Initially unsatisfied dependencies are
4222 irrelevant since we only want to avoid breaking dependencies that are
4223 initially satisfied.
4225 Since this method can consume enough time to disturb users, it is
4226 currently only enabled by the --complete-graph option.
4228 @param required_sets: contains required sets (currently only used
4229 for depclean and prune removal operations)
4230 @type required_sets: dict
4232 if "--buildpkgonly" in self._frozen_config.myopts or \
4233 "recurse" not in self._dynamic_config.myparams:
4236 if "complete" not in self._dynamic_config.myparams and \
4237 self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
4238 # Enable complete mode if an installed package version will change.
4239 version_change = False
4240 for node in self._dynamic_config.digraph:
4241 if not isinstance(node, Package) or \
4242 node.operation != "merge":
4244 vardb = self._frozen_config.roots[
4245 node.root].trees["vartree"].dbapi
4246 inst_pkg = vardb.match_pkgs(node.slot_atom)
4247 if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
4248 version_change = True
4252 self._dynamic_config.myparams["complete"] = True
4254 if "complete" not in self._dynamic_config.myparams:
4259 # Put the depgraph into a mode that causes it to only
4260 # select packages that have already been added to the
4261 # graph or those that are installed and have not been
4262 # scheduled for replacement. Also, toggle the "deep"
4263 # parameter so that all dependencies are traversed and
4265 self._select_atoms = self._select_atoms_from_graph
4266 if "remove" in self._dynamic_config.myparams:
4267 self._select_package = self._select_pkg_from_installed
4269 self._select_package = self._select_pkg_from_graph
4270 self._dynamic_config._traverse_ignored_deps = True
4271 already_deep = self._dynamic_config.myparams.get("deep") is True
4272 if not already_deep:
4273 self._dynamic_config.myparams["deep"] = True
4275 # Invalidate the package selection cache, since
4276 # _select_package has just changed implementations.
4277 for trees in self._dynamic_config._filtered_trees.values():
4278 trees["porttree"].dbapi._clear_cache()
4280 args = self._dynamic_config._initial_arg_list[:]
4281 for root in self._frozen_config.roots:
4282 if root != self._frozen_config.target_root and \
4283 "remove" in self._dynamic_config.myparams:
4284 # Only pull in deps for the relevant root.
4286 depgraph_sets = self._dynamic_config.sets[root]
4287 required_set_names = self._frozen_config._required_set_names.copy()
4288 remaining_args = required_set_names.copy()
4289 if required_sets is None or root not in required_sets:
4292 # Removal actions may override sets with temporary
4293 # replacements that have had atoms removed in order
4294 # to implement --deselect behavior.
4295 required_set_names = set(required_sets[root])
4296 depgraph_sets.sets.clear()
4297 depgraph_sets.sets.update(required_sets[root])
4298 if "remove" not in self._dynamic_config.myparams and \
4299 root == self._frozen_config.target_root and \
4301 remaining_args.difference_update(depgraph_sets.sets)
4302 if not remaining_args and \
4303 not self._dynamic_config._ignored_deps and \
4304 not self._dynamic_config._dep_stack:
4306 root_config = self._frozen_config.roots[root]
4307 for s in required_set_names:
4308 pset = depgraph_sets.sets.get(s)
4310 pset = root_config.sets[s]
4311 atom = SETPREFIX + s
4312 args.append(SetArg(arg=atom, pset=pset,
4313 root_config=root_config))
4315 self._set_args(args)
4316 for arg in self._expand_set_args(args, add_to_digraph=True):
4317 for atom in arg.pset.getAtoms():
4318 self._dynamic_config._dep_stack.append(
4319 Dependency(atom=atom, root=arg.root_config.root,
4323 if self._dynamic_config._ignored_deps:
4324 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4325 self._dynamic_config._ignored_deps = []
4326 if not self._create_graph(allow_unsatisfied=True):
4328 # Check the unsatisfied deps to see if any initially satisfied deps
4329 # will become unsatisfied due to an upgrade. Initially unsatisfied
4330 # deps are irrelevant since we only want to avoid breaking deps
4331 # that are initially satisfied.
4332 while self._dynamic_config._unsatisfied_deps:
4333 dep = self._dynamic_config._unsatisfied_deps.pop()
4334 vardb = self._frozen_config.roots[
4335 dep.root].trees["vartree"].dbapi
4336 matches = vardb.match_pkgs(dep.atom)
4338 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4340 # An scheduled installation broke a deep dependency.
4341 # Add the installed package to the graph so that it
4342 # will be appropriately reported as a slot collision
4343 # (possibly solvable via backtracking).
4344 pkg = matches[-1] # highest match
4345 if not self._add_pkg(pkg, dep):
4347 if not self._create_graph(allow_unsatisfied=True):
4351 def _pkg(self, cpv, type_name, root_config, installed=False,
4352 onlydeps=False, myrepo = None):
4354 Get a package instance from the cache, or create a new
4355 one if necessary. Raises PackageNotFound from aux_get if it
4356 failures for some reason (package does not exist or is
4360 # Ensure that we use the specially optimized RootConfig instance
4361 # that refers to FakeVartree instead of the real vartree.
4362 root_config = self._frozen_config.roots[root_config.root]
4363 pkg = self._frozen_config._pkg_cache.get(
4364 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4365 repo_name=myrepo, root_config=root_config,
4366 installed=installed, onlydeps=onlydeps))
4367 if pkg is None and onlydeps and not installed:
4368 # Maybe it already got pulled in as a "merge" node.
4369 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4370 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4371 repo_name=myrepo, root_config=root_config,
4372 installed=installed, onlydeps=False))
4375 tree_type = self.pkg_tree_map[type_name]
4376 db = root_config.trees[tree_type].dbapi
4377 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4378 tree_type].dbapi._aux_cache_keys)
4381 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4383 raise portage.exception.PackageNotFound(cpv)
4385 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4386 installed=installed, metadata=metadata, onlydeps=onlydeps,
4387 root_config=root_config, type_name=type_name)
4389 self._frozen_config._pkg_cache[pkg] = pkg
4391 if not self._pkg_visibility_check(pkg) and \
4392 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4393 slot_key = (pkg.root, pkg.slot_atom)
4394 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4395 if other_pkg is None or pkg > other_pkg:
4396 self._frozen_config._highest_license_masked[slot_key] = pkg
4400 def _validate_blockers(self):
4401 """Remove any blockers from the digraph that do not match any of the
4402 packages within the graph. If necessary, create hard deps to ensure
4403 correct merge order such that mutually blocking packages are never
4404 installed simultaneously. Also add runtime blockers from all installed
4405 packages if any of them haven't been added already (bug 128809)."""
4407 if "--buildpkgonly" in self._frozen_config.myopts or \
4408 "--nodeps" in self._frozen_config.myopts:
4411 complete = "complete" in self._dynamic_config.myparams
4412 deep = "deep" in self._dynamic_config.myparams
4415 # Pull in blockers from all installed packages that haven't already
4416 # been pulled into the depgraph, in order to ensure that they are
4417 # respected (bug 128809). Due to the performance penalty that is
4418 # incurred by all the additional dep_check calls that are required,
4419 # blockers returned from dep_check are cached on disk by the
4420 # BlockerCache class.
4422 # For installed packages, always ignore blockers from DEPEND since
4423 # only runtime dependencies should be relevant for packages that
4424 # are already built.
4425 dep_keys = ["RDEPEND", "PDEPEND"]
4426 for myroot in self._frozen_config.trees:
4427 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4428 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4429 pkgsettings = self._frozen_config.pkgsettings[myroot]
4430 root_config = self._frozen_config.roots[myroot]
4431 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
4432 final_db = self._dynamic_config.mydbapi[myroot]
4434 blocker_cache = BlockerCache(myroot, vardb)
4435 stale_cache = set(blocker_cache)
4438 stale_cache.discard(cpv)
4439 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4441 pkg in self._dynamic_config._traversed_pkg_deps
4443 # Check for masked installed packages. Only warn about
4444 # packages that are in the graph in order to avoid warning
4445 # about those that will be automatically uninstalled during
4446 # the merge process or by --depclean. Always warn about
4447 # packages masked by license, since the user likely wants
4448 # to adjust ACCEPT_LICENSE.
4450 if not self._pkg_visibility_check(pkg,
4451 trust_graph=False) and \
4452 (pkg_in_graph or 'LICENSE' in pkg.masks):
4453 self._dynamic_config._masked_installed.add(pkg)
4455 self._check_masks(pkg)
4457 blocker_atoms = None
4463 self._dynamic_config._blocker_parents.child_nodes(pkg))
4468 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4472 # Select just the runtime blockers.
4473 blockers = [blocker for blocker in blockers \
4474 if blocker.priority.runtime or \
4475 blocker.priority.runtime_post]
4476 if blockers is not None:
4477 blockers = set(blocker.atom for blocker in blockers)
4479 # If this node has any blockers, create a "nomerge"
4480 # node for it so that they can be enforced.
4481 self._spinner_update()
4482 blocker_data = blocker_cache.get(cpv)
4483 if blocker_data is not None and \
4484 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4487 # If blocker data from the graph is available, use
4488 # it to validate the cache and update the cache if
4490 if blocker_data is not None and \
4491 blockers is not None:
4492 if not blockers.symmetric_difference(
4493 blocker_data.atoms):
4497 if blocker_data is None and \
4498 blockers is not None:
4499 # Re-use the blockers from the graph.
4500 blocker_atoms = sorted(blockers)
4501 counter = long(pkg.metadata["COUNTER"])
4503 blocker_cache.BlockerData(counter, blocker_atoms)
4504 blocker_cache[pkg.cpv] = blocker_data
4508 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4510 # Use aux_get() to trigger FakeVartree global
4511 # updates on *DEPEND when appropriate.
4512 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4513 # It is crucial to pass in final_db here in order to
4514 # optimize dep_check calls by eliminating atoms via
4515 # dep_wordreduce and dep_eval calls.
4517 success, atoms = portage.dep_check(depstr,
4518 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4519 trees=self._dynamic_config._graph_trees, myroot=myroot)
4522 except Exception as e:
4523 # This is helpful, for example, if a ValueError
4524 # is thrown from cpv_expand due to multiple
4525 # matches (this can happen if an atom lacks a
4527 show_invalid_depstring_notice(
4528 pkg, depstr, _unicode_decode("%s") % (e,))
4532 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4533 if replacement_pkg and \
4534 replacement_pkg[0].operation == "merge":
4535 # This package is being replaced anyway, so
4536 # ignore invalid dependencies so as not to
4537 # annoy the user too much (otherwise they'd be
4538 # forced to manually unmerge it first).
4540 show_invalid_depstring_notice(pkg, depstr, atoms)
4542 blocker_atoms = [myatom for myatom in atoms \
4544 blocker_atoms.sort()
4545 counter = long(pkg.metadata["COUNTER"])
4546 blocker_cache[cpv] = \
4547 blocker_cache.BlockerData(counter, blocker_atoms)
4550 for atom in blocker_atoms:
4551 blocker = Blocker(atom=atom,
4552 eapi=pkg.metadata["EAPI"],
4553 priority=self._priority(runtime=True),
4555 self._dynamic_config._blocker_parents.add(blocker, pkg)
4556 except portage.exception.InvalidAtom as e:
4557 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4558 show_invalid_depstring_notice(
4560 _unicode_decode("Invalid Atom: %s") % (e,))
4562 for cpv in stale_cache:
4563 del blocker_cache[cpv]
4564 blocker_cache.flush()
4567 # Discard any "uninstall" tasks scheduled by previous calls
4568 # to this method, since those tasks may not make sense given
4569 # the current graph state.
4570 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
4571 if previous_uninstall_tasks:
4572 self._dynamic_config._blocker_uninstalls = digraph()
4573 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
4575 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
4576 self._spinner_update()
4577 root_config = self._frozen_config.roots[blocker.root]
4578 virtuals = root_config.settings.getvirtuals()
4579 myroot = blocker.root
4580 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
4581 final_db = self._dynamic_config.mydbapi[myroot]
4583 provider_virtual = False
4584 if blocker.cp in virtuals and \
4585 not self._have_new_virt(blocker.root, blocker.cp):
4586 provider_virtual = True
4588 # Use this to check PROVIDE for each matched package
4590 atom_set = InternalPackageSet(
4591 initial_atoms=[blocker.atom])
4593 if provider_virtual:
4595 for provider_entry in virtuals[blocker.cp]:
4596 atoms.append(Atom(blocker.atom.replace(
4597 blocker.cp, provider_entry.cp, 1)))
4599 atoms = [blocker.atom]
4601 blocked_initial = set()
4603 for pkg in initial_db.match_pkgs(atom):
4604 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4605 blocked_initial.add(pkg)
4607 blocked_final = set()
4609 for pkg in final_db.match_pkgs(atom):
4610 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4611 blocked_final.add(pkg)
4613 if not blocked_initial and not blocked_final:
4614 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
4615 self._dynamic_config._blocker_parents.remove(blocker)
4616 # Discard any parents that don't have any more blockers.
4617 for pkg in parent_pkgs:
4618 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
4619 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
4620 self._dynamic_config._blocker_parents.remove(pkg)
4622 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4623 unresolved_blocks = False
4624 depends_on_order = set()
4625 for pkg in blocked_initial:
4626 if pkg.slot_atom == parent.slot_atom and \
4627 not blocker.atom.blocker.overlap.forbid:
4628 # New !!atom blockers do not allow temporary
4629 # simulaneous installation, so unlike !atom
4630 # blockers, !!atom blockers aren't ignored
4631 # when they match other packages occupying
4634 if parent.installed:
4635 # Two currently installed packages conflict with
4636 # eachother. Ignore this case since the damage
4637 # is already done and this would be likely to
4638 # confuse users if displayed like a normal blocker.
4641 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4643 if parent.operation == "merge":
4644 # Maybe the blocked package can be replaced or simply
4645 # unmerged to resolve this block.
4646 depends_on_order.add((pkg, parent))
4648 # None of the above blocker resolutions techniques apply,
4649 # so apparently this one is unresolvable.
4650 unresolved_blocks = True
4651 for pkg in blocked_final:
4652 if pkg.slot_atom == parent.slot_atom and \
4653 not blocker.atom.blocker.overlap.forbid:
4654 # New !!atom blockers do not allow temporary
4655 # simulaneous installation, so unlike !atom
4656 # blockers, !!atom blockers aren't ignored
4657 # when they match other packages occupying
4660 if parent.operation == "nomerge" and \
4661 pkg.operation == "nomerge":
4662 # This blocker will be handled the next time that a
4663 # merge of either package is triggered.
4666 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4668 # Maybe the blocking package can be
4669 # unmerged to resolve this block.
4670 if parent.operation == "merge" and pkg.installed:
4671 depends_on_order.add((pkg, parent))
4673 elif parent.operation == "nomerge":
4674 depends_on_order.add((parent, pkg))
4676 # None of the above blocker resolutions techniques apply,
4677 # so apparently this one is unresolvable.
4678 unresolved_blocks = True
4680 # Make sure we don't unmerge any package that have been pulled
4682 if not unresolved_blocks and depends_on_order:
4683 for inst_pkg, inst_task in depends_on_order:
4684 if self._dynamic_config.digraph.contains(inst_pkg) and \
4685 self._dynamic_config.digraph.parent_nodes(inst_pkg):
4686 unresolved_blocks = True
4689 if not unresolved_blocks and depends_on_order:
4690 for inst_pkg, inst_task in depends_on_order:
4691 uninst_task = Package(built=inst_pkg.built,
4692 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4693 metadata=inst_pkg.metadata,
4694 operation="uninstall",
4695 root_config=inst_pkg.root_config,
4696 type_name=inst_pkg.type_name)
4697 # Enforce correct merge order with a hard dep.
4698 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
4699 priority=BlockerDepPriority.instance)
4700 # Count references to this blocker so that it can be
4701 # invalidated after nodes referencing it have been
4703 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
4704 if not unresolved_blocks and not depends_on_order:
4705 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
4706 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
4707 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
4708 self._dynamic_config._blocker_parents.remove(blocker)
4709 if not self._dynamic_config._blocker_parents.child_nodes(parent):
4710 self._dynamic_config._blocker_parents.remove(parent)
4711 if unresolved_blocks:
4712 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
4716 def _accept_blocker_conflicts(self):
4718 for x in ("--buildpkgonly", "--fetchonly",
4719 "--fetch-all-uri", "--nodeps"):
4720 if x in self._frozen_config.myopts:
4725 def _merge_order_bias(self, mygraph):
4727 For optimal leaf node selection, promote deep system runtime deps and
4728 order nodes from highest to lowest overall reference count.
4732 for node in mygraph.order:
4733 node_info[node] = len(mygraph.parent_nodes(node))
4734 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4736 def cmp_merge_preference(node1, node2):
4738 if node1.operation == 'uninstall':
4739 if node2.operation == 'uninstall':
4743 if node2.operation == 'uninstall':
4744 if node1.operation == 'uninstall':
4748 node1_sys = node1 in deep_system_deps
4749 node2_sys = node2 in deep_system_deps
4750 if node1_sys != node2_sys:
4755 return node_info[node2] - node_info[node1]
4757 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4759 def altlist(self, reversed=False):
4761 while self._dynamic_config._serialized_tasks_cache is None:
4762 self._resolve_conflicts()
4764 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
4765 self._serialize_tasks()
4766 except self._serialize_tasks_retry:
4769 retlist = self._dynamic_config._serialized_tasks_cache[:]
4774 def _implicit_libc_deps(self, mergelist, graph):
4776 Create implicit dependencies on libc, in order to ensure that libc
4777 is installed as early as possible (see bug #303567).
4780 implicit_libc_roots = (self._frozen_config._running_root.root,)
4781 for root in implicit_libc_roots:
4782 graphdb = self._dynamic_config.mydbapi[root]
4783 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4784 for atom in self._expand_virt_from_graph(root,
4785 portage.const.LIBC_PACKAGE_ATOM):
4788 match = graphdb.match_pkgs(atom)
4792 if pkg.operation == "merge" and \
4793 not vardb.cpv_exists(pkg.cpv):
4794 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
4799 earlier_libc_pkgs = set()
4801 for pkg in mergelist:
4802 if not isinstance(pkg, Package):
4803 # a satisfied blocker
4805 root_libc_pkgs = libc_pkgs.get(pkg.root)
4806 if root_libc_pkgs is not None and \
4807 pkg.operation == "merge":
4808 if pkg in root_libc_pkgs:
4809 earlier_libc_pkgs.add(pkg)
4811 for libc_pkg in root_libc_pkgs:
4812 if libc_pkg in earlier_libc_pkgs:
4813 graph.add(libc_pkg, pkg,
4814 priority=DepPriority(buildtime=True))
4816 def schedulerGraph(self):
4818 The scheduler graph is identical to the normal one except that
4819 uninstall edges are reversed in specific cases that require
4820 conflicting packages to be temporarily installed simultaneously.
4821 This is intended for use by the Scheduler in it's parallelization
4822 logic. It ensures that temporary simultaneous installation of
4823 conflicting packages is avoided when appropriate (especially for
4824 !!atom blockers), but allowed in specific cases that require it.
4826 Note that this method calls break_refs() which alters the state of
4827 internal Package instances such that this depgraph instance should
4828 not be used to perform any more calculations.
4831 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4832 mergelist = self.altlist()
4833 self._implicit_libc_deps(mergelist,
4834 self._dynamic_config._scheduler_graph)
4836 # Break DepPriority.satisfied attributes which reference
4837 # installed Package instances.
4838 for parents, children, node in \
4839 self._dynamic_config._scheduler_graph.nodes.values():
4840 for priorities in chain(parents.values(), children.values()):
4841 for priority in priorities:
4842 if priority.satisfied:
4843 priority.satisfied = True
4845 pkg_cache = self._frozen_config._pkg_cache
4846 graph = self._dynamic_config._scheduler_graph
4847 trees = self._frozen_config.trees
4848 pruned_pkg_cache = {}
4849 for key, pkg in pkg_cache.items():
4850 if pkg in graph or \
4851 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4852 pruned_pkg_cache[key] = pkg
4855 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4859 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4863 def break_refs(self):
4865 Break any references in Package instances that lead back to the depgraph.
4866 This is useful if you want to hold references to packages without also
4867 holding the depgraph on the heap. It should only be called after the
4868 depgraph and _frozen_config will not be used for any more calculations.
4870 for root_config in self._frozen_config.roots.values():
4871 root_config.update(self._frozen_config._trees_orig[
4872 root_config.root]["root_config"])
4873 # Both instances are now identical, so discard the
4874 # original which should have no other references.
4875 self._frozen_config._trees_orig[
4876 root_config.root]["root_config"] = root_config
4878 def _resolve_conflicts(self):
4879 if not self._complete_graph():
4880 raise self._unknown_internal_error()
4882 if not self._validate_blockers():
4883 self._dynamic_config._skip_restart = True
4884 raise self._unknown_internal_error()
4886 if self._dynamic_config._slot_collision_info:
4887 self._process_slot_conflicts()
4889 def _serialize_tasks(self):
4891 debug = "--debug" in self._frozen_config.myopts
4894 writemsg("\ndigraph:\n\n", noiselevel=-1)
4895 self._dynamic_config.digraph.debug_print()
4896 writemsg("\n", noiselevel=-1)
4898 scheduler_graph = self._dynamic_config.digraph.copy()
4900 if '--nodeps' in self._frozen_config.myopts:
4901 # Preserve the package order given on the command line.
4902 return ([node for node in scheduler_graph \
4903 if isinstance(node, Package) \
4904 and node.operation == 'merge'], scheduler_graph)
4906 mygraph=self._dynamic_config.digraph.copy()
4908 removed_nodes = set()
4910 # Prune off all DependencyArg instances since they aren't
4911 # needed, and because of nested sets this is faster than doing
4912 # it with multiple digraph.root_nodes() calls below. This also
4913 # takes care of nested sets that have circular references,
4914 # which wouldn't be matched by digraph.root_nodes().
4915 for node in mygraph:
4916 if isinstance(node, DependencyArg):
4917 removed_nodes.add(node)
4919 mygraph.difference_update(removed_nodes)
4920 removed_nodes.clear()
4922 # Prune "nomerge" root nodes if nothing depends on them, since
4923 # otherwise they slow down merge order calculation. Don't remove
4924 # non-root nodes since they help optimize merge order in some cases
4925 # such as revdep-rebuild.
4928 for node in mygraph.root_nodes():
4929 if not isinstance(node, Package) or \
4930 node.installed or node.onlydeps:
4931 removed_nodes.add(node)
4933 self._spinner_update()
4934 mygraph.difference_update(removed_nodes)
4935 if not removed_nodes:
4937 removed_nodes.clear()
4938 self._merge_order_bias(mygraph)
4939 def cmp_circular_bias(n1, n2):
4941 RDEPEND is stronger than PDEPEND and this function
4942 measures such a strength bias within a circular
4943 dependency relationship.
4945 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4946 ignore_priority=priority_range.ignore_medium_soft)
4947 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4948 ignore_priority=priority_range.ignore_medium_soft)
4949 if n1_n2_medium == n2_n1_medium:
4954 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4956 # Contains uninstall tasks that have been scheduled to
4957 # occur after overlapping blockers have been installed.
4958 scheduled_uninstalls = set()
4959 # Contains any Uninstall tasks that have been ignored
4960 # in order to avoid the circular deps code path. These
4961 # correspond to blocker conflicts that could not be
4963 ignored_uninstall_tasks = set()
4964 have_uninstall_task = False
4965 complete = "complete" in self._dynamic_config.myparams
4968 def get_nodes(**kwargs):
4970 Returns leaf nodes excluding Uninstall instances
4971 since those should be executed as late as possible.
4973 return [node for node in mygraph.leaf_nodes(**kwargs) \
4974 if isinstance(node, Package) and \
4975 (node.operation != "uninstall" or \
4976 node in scheduled_uninstalls)]
4978 # sys-apps/portage needs special treatment if ROOT="/"
4979 running_root = self._frozen_config._running_root.root
4980 runtime_deps = InternalPackageSet(
4981 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4982 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4983 PORTAGE_PACKAGE_ATOM)
4984 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4985 PORTAGE_PACKAGE_ATOM)
4988 running_portage = running_portage[0]
4990 running_portage = None
4992 if replacement_portage:
4993 replacement_portage = replacement_portage[0]
4995 replacement_portage = None
4997 if replacement_portage == running_portage:
4998 replacement_portage = None
5000 if replacement_portage is not None and \
5001 (running_portage is None or \
5002 running_portage.cpv != replacement_portage.cpv or \
5003 '9999' in replacement_portage.cpv or \
5004 'git' in replacement_portage.inherited or \
5005 'git-2' in replacement_portage.inherited):
5006 # update from running_portage to replacement_portage asap
5007 asap_nodes.append(replacement_portage)
5009 if running_portage is not None:
5011 portage_rdepend = self._select_atoms_highest_available(
5012 running_root, running_portage.metadata["RDEPEND"],
5013 myuse=self._pkg_use_enabled(running_portage),
5014 parent=running_portage, strict=False)
5015 except portage.exception.InvalidDependString as e:
5016 portage.writemsg("!!! Invalid RDEPEND in " + \
5017 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5018 (running_root, running_portage.cpv, e), noiselevel=-1)
5020 portage_rdepend = {running_portage : []}
5021 for atoms in portage_rdepend.values():
5022 runtime_deps.update(atom for atom in atoms \
5023 if not atom.blocker)
5025 # Merge libc asap, in order to account for implicit
5026 # dependencies. See bug #303567.
5027 implicit_libc_roots = (running_root,)
5028 for root in implicit_libc_roots:
5030 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5031 graphdb = self._dynamic_config.mydbapi[root]
5032 for atom in self._expand_virt_from_graph(root,
5033 portage.const.LIBC_PACKAGE_ATOM):
5036 match = graphdb.match_pkgs(atom)
5040 if pkg.operation == "merge" and \
5041 not vardb.cpv_exists(pkg.cpv):
5045 # If there's also an os-headers upgrade, we need to
5046 # pull that in first. See bug #328317.
5047 for atom in self._expand_virt_from_graph(root,
5048 portage.const.OS_HEADERS_PACKAGE_ATOM):
5051 match = graphdb.match_pkgs(atom)
5055 if pkg.operation == "merge" and \
5056 not vardb.cpv_exists(pkg.cpv):
5057 asap_nodes.append(pkg)
5059 asap_nodes.extend(libc_pkgs)
5061 def gather_deps(ignore_priority, mergeable_nodes,
5062 selected_nodes, node):
5064 Recursively gather a group of nodes that RDEPEND on
5065 eachother. This ensures that they are merged as a group
5066 and get their RDEPENDs satisfied as soon as possible.
5068 if node in selected_nodes:
5070 if node not in mergeable_nodes:
5072 if node == replacement_portage and \
5073 mygraph.child_nodes(node,
5074 ignore_priority=priority_range.ignore_medium_soft):
5075 # Make sure that portage always has all of it's
5076 # RDEPENDs installed first.
5078 selected_nodes.add(node)
5079 for child in mygraph.child_nodes(node,
5080 ignore_priority=ignore_priority):
5081 if not gather_deps(ignore_priority,
5082 mergeable_nodes, selected_nodes, child):
5086 def ignore_uninst_or_med(priority):
5087 if priority is BlockerDepPriority.instance:
5089 return priority_range.ignore_medium(priority)
5091 def ignore_uninst_or_med_soft(priority):
5092 if priority is BlockerDepPriority.instance:
5094 return priority_range.ignore_medium_soft(priority)
5096 tree_mode = "--tree" in self._frozen_config.myopts
5097 # Tracks whether or not the current iteration should prefer asap_nodes
5098 # if available. This is set to False when the previous iteration
5099 # failed to select any nodes. It is reset whenever nodes are
5100 # successfully selected.
5103 # Controls whether or not the current iteration should drop edges that
5104 # are "satisfied" by installed packages, in order to solve circular
5105 # dependencies. The deep runtime dependencies of installed packages are
5106 # not checked in this case (bug #199856), so it must be avoided
5107 # whenever possible.
5108 drop_satisfied = False
5110 # State of variables for successive iterations that loosen the
5111 # criteria for node selection.
5113 # iteration prefer_asap drop_satisfied
5118 # If no nodes are selected on the last iteration, it is due to
5119 # unresolved blockers or circular dependencies.
5122 self._spinner_update()
5123 selected_nodes = None
5124 ignore_priority = None
5125 if drop_satisfied or (prefer_asap and asap_nodes):
5126 priority_range = DepPrioritySatisfiedRange
5128 priority_range = DepPriorityNormalRange
5129 if prefer_asap and asap_nodes:
5130 # ASAP nodes are merged before their soft deps. Go ahead and
5131 # select root nodes here if necessary, since it's typical for
5132 # the parent to have been removed from the graph already.
5133 asap_nodes = [node for node in asap_nodes \
5134 if mygraph.contains(node)]
5135 for i in range(priority_range.SOFT,
5136 priority_range.MEDIUM_SOFT + 1):
5137 ignore_priority = priority_range.ignore_priority[i]
5138 for node in asap_nodes:
5139 if not mygraph.child_nodes(node,
5140 ignore_priority=ignore_priority):
5141 selected_nodes = [node]
5142 asap_nodes.remove(node)
5147 if not selected_nodes and \
5148 not (prefer_asap and asap_nodes):
5149 for i in range(priority_range.NONE,
5150 priority_range.MEDIUM_SOFT + 1):
5151 ignore_priority = priority_range.ignore_priority[i]
5152 nodes = get_nodes(ignore_priority=ignore_priority)
5154 # If there is a mixture of merges and uninstalls,
5155 # do the uninstalls first.
5156 good_uninstalls = None
5158 good_uninstalls = []
5160 if node.operation == "uninstall":
5161 good_uninstalls.append(node)
5164 nodes = good_uninstalls
5168 if good_uninstalls or len(nodes) == 1 or \
5169 (ignore_priority is None and \
5170 not asap_nodes and not tree_mode):
5171 # Greedily pop all of these nodes since no
5172 # relationship has been ignored. This optimization
5173 # destroys --tree output, so it's disabled in tree
5175 selected_nodes = nodes
5177 # For optimal merge order:
5178 # * Only pop one node.
5179 # * Removing a root node (node without a parent)
5180 # will not produce a leaf node, so avoid it.
5181 # * It's normal for a selected uninstall to be a
5182 # root node, so don't check them for parents.
5184 prefer_asap_parents = (True, False)
5186 prefer_asap_parents = (False,)
5187 for check_asap_parent in prefer_asap_parents:
5188 if check_asap_parent:
5190 parents = mygraph.parent_nodes(node,
5191 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5192 if parents and set(parents).intersection(asap_nodes):
5193 selected_nodes = [node]
5197 if mygraph.parent_nodes(node):
5198 selected_nodes = [node]
5205 if not selected_nodes:
5206 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
5208 mergeable_nodes = set(nodes)
5209 if prefer_asap and asap_nodes:
5211 # When gathering the nodes belonging to a runtime cycle,
5212 # we want to minimize the number of nodes gathered, since
5213 # this tends to produce a more optimal merge order.
5214 # Ignoring all medium_soft deps serves this purpose.
5215 # In the case of multiple runtime cycles, where some cycles
5216 # may depend on smaller independent cycles, it's optimal
5217 # to merge smaller independent cycles before other cycles
5218 # that depend on them. Therefore, we search for the
5219 # smallest cycle in order to try and identify and prefer
5220 # these smaller independent cycles.
5221 ignore_priority = priority_range.ignore_medium_soft
5222 smallest_cycle = None
5224 if not mygraph.parent_nodes(node):
5226 selected_nodes = set()
5227 if gather_deps(ignore_priority,
5228 mergeable_nodes, selected_nodes, node):
5229 # When selecting asap_nodes, we need to ensure
5230 # that we haven't selected a large runtime cycle
5231 # that is obviously sub-optimal. This will be
5232 # obvious if any of the non-asap selected_nodes
5233 # is a leaf node when medium_soft deps are
5235 if prefer_asap and asap_nodes and \
5236 len(selected_nodes) > 1:
5237 for node in selected_nodes.difference(
5239 if not mygraph.child_nodes(node,
5241 DepPriorityNormalRange.ignore_medium_soft):
5242 selected_nodes = None
5245 if smallest_cycle is None or \
5246 len(selected_nodes) < len(smallest_cycle):
5247 smallest_cycle = selected_nodes
5249 selected_nodes = smallest_cycle
5251 if selected_nodes and debug:
5252 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
5253 (len(selected_nodes),), noiselevel=-1)
5254 cycle_digraph = mygraph.copy()
5255 cycle_digraph.difference_update([x for x in
5256 cycle_digraph if x not in selected_nodes])
5257 cycle_digraph.debug_print()
5258 writemsg("\n", noiselevel=-1)
5260 if prefer_asap and asap_nodes and not selected_nodes:
5261 # We failed to find any asap nodes to merge, so ignore
5262 # them for the next iteration.
5266 if selected_nodes and ignore_priority is not None:
5267 # Try to merge ignored medium_soft deps as soon as possible
5268 # if they're not satisfied by installed packages.
5269 for node in selected_nodes:
5270 children = set(mygraph.child_nodes(node))
5271 soft = children.difference(
5272 mygraph.child_nodes(node,
5273 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
5274 medium_soft = children.difference(
5275 mygraph.child_nodes(node,
5277 DepPrioritySatisfiedRange.ignore_medium_soft))
5278 medium_soft.difference_update(soft)
5279 for child in medium_soft:
5280 if child in selected_nodes:
5282 if child in asap_nodes:
5284 # Merge PDEPEND asap for bug #180045.
5285 asap_nodes.append(child)
5287 if selected_nodes and len(selected_nodes) > 1:
5288 if not isinstance(selected_nodes, list):
5289 selected_nodes = list(selected_nodes)
5290 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5292 if not selected_nodes and myblocker_uninstalls:
5293 # An Uninstall task needs to be executed in order to
5294 # avoid conflict if possible.
5297 priority_range = DepPrioritySatisfiedRange
5299 priority_range = DepPriorityNormalRange
5301 mergeable_nodes = get_nodes(
5302 ignore_priority=ignore_uninst_or_med)
5304 min_parent_deps = None
5307 for task in myblocker_uninstalls.leaf_nodes():
5308 # Do some sanity checks so that system or world packages
5309 # don't get uninstalled inappropriately here (only really
5310 # necessary when --complete-graph has not been enabled).
5312 if task in ignored_uninstall_tasks:
5315 if task in scheduled_uninstalls:
5316 # It's been scheduled but it hasn't
5317 # been executed yet due to dependence
5318 # on installation of blocking packages.
5321 root_config = self._frozen_config.roots[task.root]
5322 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5325 if self._dynamic_config.digraph.contains(inst_pkg):
5328 forbid_overlap = False
5329 heuristic_overlap = False
5330 for blocker in myblocker_uninstalls.parent_nodes(task):
5331 if not eapi_has_strong_blocks(blocker.eapi):
5332 heuristic_overlap = True
5333 elif blocker.atom.blocker.overlap.forbid:
5334 forbid_overlap = True
5336 if forbid_overlap and running_root == task.root:
5339 if heuristic_overlap and running_root == task.root:
5340 # Never uninstall sys-apps/portage or it's essential
5341 # dependencies, except through replacement.
5343 runtime_dep_atoms = \
5344 list(runtime_deps.iterAtomsForPackage(task))
5345 except portage.exception.InvalidDependString as e:
5346 portage.writemsg("!!! Invalid PROVIDE in " + \
5347 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5348 (task.root, task.cpv, e), noiselevel=-1)
5352 # Don't uninstall a runtime dep if it appears
5353 # to be the only suitable one installed.
5355 vardb = root_config.trees["vartree"].dbapi
5356 for atom in runtime_dep_atoms:
5357 other_version = None
5358 for pkg in vardb.match_pkgs(atom):
5359 if pkg.cpv == task.cpv and \
5360 pkg.metadata["COUNTER"] == \
5361 task.metadata["COUNTER"]:
5365 if other_version is None:
5371 # For packages in the system set, don't take
5372 # any chances. If the conflict can't be resolved
5373 # by a normal replacement operation then abort.
5376 for atom in root_config.sets[
5377 "system"].iterAtomsForPackage(task):
5380 except portage.exception.InvalidDependString as e:
5381 portage.writemsg("!!! Invalid PROVIDE in " + \
5382 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5383 (task.root, task.cpv, e), noiselevel=-1)
5389 # Note that the world check isn't always
5390 # necessary since self._complete_graph() will
5391 # add all packages from the system and world sets to the
5392 # graph. This just allows unresolved conflicts to be
5393 # detected as early as possible, which makes it possible
5394 # to avoid calling self._complete_graph() when it is
5395 # unnecessary due to blockers triggering an abortion.
5397 # For packages in the world set, go ahead an uninstall
5398 # when necessary, as long as the atom will be satisfied
5399 # in the final state.
5400 graph_db = self._dynamic_config.mydbapi[task.root]
5403 for atom in root_config.sets[
5404 "selected"].iterAtomsForPackage(task):
5406 for pkg in graph_db.match_pkgs(atom):
5413 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5415 except portage.exception.InvalidDependString as e:
5416 portage.writemsg("!!! Invalid PROVIDE in " + \
5417 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5418 (task.root, task.cpv, e), noiselevel=-1)
5424 # Check the deps of parent nodes to ensure that
5425 # the chosen task produces a leaf node. Maybe
5426 # this can be optimized some more to make the
5427 # best possible choice, but the current algorithm
5428 # is simple and should be near optimal for most
5430 self._spinner_update()
5431 mergeable_parent = False
5433 parent_deps.add(task)
5434 for parent in mygraph.parent_nodes(task):
5435 parent_deps.update(mygraph.child_nodes(parent,
5436 ignore_priority=priority_range.ignore_medium_soft))
5437 if min_parent_deps is not None and \
5438 len(parent_deps) >= min_parent_deps:
5439 # This task is no better than a previously selected
5440 # task, so abort search now in order to avoid wasting
5441 # any more cpu time on this task. This increases
5442 # performance dramatically in cases when there are
5443 # hundreds of blockers to solve, like when
5444 # upgrading to a new slot of kde-meta.
5445 mergeable_parent = None
5447 if parent in mergeable_nodes and \
5448 gather_deps(ignore_uninst_or_med_soft,
5449 mergeable_nodes, set(), parent):
5450 mergeable_parent = True
5452 if not mergeable_parent:
5455 if min_parent_deps is None or \
5456 len(parent_deps) < min_parent_deps:
5457 min_parent_deps = len(parent_deps)
5460 if uninst_task is not None and min_parent_deps == 1:
5461 # This is the best possible result, so so abort search
5462 # now in order to avoid wasting any more cpu time.
5465 if uninst_task is not None:
5466 # The uninstall is performed only after blocking
5467 # packages have been merged on top of it. File
5468 # collisions between blocking packages are detected
5469 # and removed from the list of files to be uninstalled.
5470 scheduled_uninstalls.add(uninst_task)
5471 parent_nodes = mygraph.parent_nodes(uninst_task)
5473 # Reverse the parent -> uninstall edges since we want
5474 # to do the uninstall after blocking packages have
5475 # been merged on top of it.
5476 mygraph.remove(uninst_task)
5477 for blocked_pkg in parent_nodes:
5478 mygraph.add(blocked_pkg, uninst_task,
5479 priority=BlockerDepPriority.instance)
5480 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5481 scheduler_graph.add(blocked_pkg, uninst_task,
5482 priority=BlockerDepPriority.instance)
5484 # Sometimes a merge node will render an uninstall
5485 # node unnecessary (due to occupying the same SLOT),
5486 # and we want to avoid executing a separate uninstall
5487 # task in that case.
5488 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5489 ].match_pkgs(uninst_task.slot_atom)
5491 slot_node[0].operation == "merge":
5492 mygraph.add(slot_node[0], uninst_task,
5493 priority=BlockerDepPriority.instance)
5495 # Reset the state variables for leaf node selection and
5496 # continue trying to select leaf nodes.
5498 drop_satisfied = False
5501 if not selected_nodes:
5502 # Only select root nodes as a last resort. This case should
5503 # only trigger when the graph is nearly empty and the only
5504 # remaining nodes are isolated (no parents or children). Since
5505 # the nodes must be isolated, ignore_priority is not needed.
5506 selected_nodes = get_nodes()
5508 if not selected_nodes and not drop_satisfied:
5509 drop_satisfied = True
5512 if not selected_nodes and myblocker_uninstalls:
5513 # If possible, drop an uninstall task here in order to avoid
5514 # the circular deps code path. The corresponding blocker will
5515 # still be counted as an unresolved conflict.
5517 for node in myblocker_uninstalls.leaf_nodes():
5519 mygraph.remove(node)
5524 ignored_uninstall_tasks.add(node)
5527 if uninst_task is not None:
5528 # Reset the state variables for leaf node selection and
5529 # continue trying to select leaf nodes.
5531 drop_satisfied = False
5534 if not selected_nodes:
5535 self._dynamic_config._circular_deps_for_display = mygraph
5536 self._dynamic_config._skip_restart = True
5537 raise self._unknown_internal_error()
5539 # At this point, we've succeeded in selecting one or more nodes, so
5540 # reset state variables for leaf node selection.
5542 drop_satisfied = False
5544 mygraph.difference_update(selected_nodes)
5546 for node in selected_nodes:
5547 if isinstance(node, Package) and \
5548 node.operation == "nomerge":
5551 # Handle interactions between blockers
5552 # and uninstallation tasks.
5553 solved_blockers = set()
5555 if isinstance(node, Package) and \
5556 "uninstall" == node.operation:
5557 have_uninstall_task = True
5560 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
5561 inst_pkg = vardb.match_pkgs(node.slot_atom)
5563 # The package will be replaced by this one, so remove
5564 # the corresponding Uninstall task if necessary.
5565 inst_pkg = inst_pkg[0]
5566 uninst_task = Package(built=inst_pkg.built,
5567 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5568 metadata=inst_pkg.metadata,
5569 operation="uninstall",
5570 root_config=inst_pkg.root_config,
5571 type_name=inst_pkg.type_name)
5573 mygraph.remove(uninst_task)
5577 if uninst_task is not None and \
5578 uninst_task not in ignored_uninstall_tasks and \
5579 myblocker_uninstalls.contains(uninst_task):
5580 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
5581 myblocker_uninstalls.remove(uninst_task)
5582 # Discard any blockers that this Uninstall solves.
5583 for blocker in blocker_nodes:
5584 if not myblocker_uninstalls.child_nodes(blocker):
5585 myblocker_uninstalls.remove(blocker)
5587 self._dynamic_config._unsolvable_blockers:
5588 solved_blockers.add(blocker)
5590 retlist.append(node)
5592 if (isinstance(node, Package) and \
5593 "uninstall" == node.operation) or \
5594 (uninst_task is not None and \
5595 uninst_task in scheduled_uninstalls):
5596 # Include satisfied blockers in the merge list
5597 # since the user might be interested and also
5598 # it serves as an indicator that blocking packages
5599 # will be temporarily installed simultaneously.
5600 for blocker in solved_blockers:
5601 retlist.append(blocker)
5603 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
5604 for node in myblocker_uninstalls.root_nodes():
5605 unsolvable_blockers.add(node)
5607 # If any Uninstall tasks need to be executed in order
5608 # to avoid a conflict, complete the graph with any
5609 # dependencies that may have been initially
5610 # neglected (to ensure that unsafe Uninstall tasks
5611 # are properly identified and blocked from execution).
5612 if have_uninstall_task and \
5614 not unsolvable_blockers:
5615 self._dynamic_config.myparams["complete"] = True
5616 if '--debug' in self._frozen_config.myopts:
5618 msg.append("enabling 'complete' depgraph mode " + \
5619 "due to uninstall task(s):")
5621 for node in retlist:
5622 if isinstance(node, Package) and \
5623 node.operation == 'uninstall':
5624 msg.append("\t%s" % (node,))
5625 writemsg_level("\n%s\n" % \
5626 "".join("%s\n" % line for line in msg),
5627 level=logging.DEBUG, noiselevel=-1)
5628 raise self._serialize_tasks_retry("")
5630 # Set satisfied state on blockers, but not before the
5631 # above retry path, since we don't want to modify the
5632 # state in that case.
5633 for node in retlist:
5634 if isinstance(node, Blocker):
5635 node.satisfied = True
5637 for blocker in unsolvable_blockers:
5638 retlist.append(blocker)
5640 if unsolvable_blockers and \
5641 not self._accept_blocker_conflicts():
5642 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
5643 self._dynamic_config._serialized_tasks_cache = retlist[:]
5644 self._dynamic_config._scheduler_graph = scheduler_graph
5645 self._dynamic_config._skip_restart = True
5646 raise self._unknown_internal_error()
5648 if self._dynamic_config._slot_collision_info and \
5649 not self._accept_blocker_conflicts():
5650 self._dynamic_config._serialized_tasks_cache = retlist[:]
5651 self._dynamic_config._scheduler_graph = scheduler_graph
5652 raise self._unknown_internal_error()
5654 return retlist, scheduler_graph
5656 def _show_circular_deps(self, mygraph):
5657 self._dynamic_config._circular_dependency_handler = \
5658 circular_dependency_handler(self, mygraph)
5659 handler = self._dynamic_config._circular_dependency_handler
5661 self._frozen_config.myopts.pop("--quiet", None)
5662 self._frozen_config.myopts["--verbose"] = True
5663 self._frozen_config.myopts["--tree"] = True
5664 portage.writemsg("\n\n", noiselevel=-1)
5665 self.display(handler.merge_list)
5666 prefix = colorize("BAD", " * ")
5667 portage.writemsg("\n", noiselevel=-1)
5668 portage.writemsg(prefix + "Error: circular dependencies:\n",
5670 portage.writemsg("\n", noiselevel=-1)
5672 if handler.circular_dep_message is None:
5673 handler.debug_print()
5674 portage.writemsg("\n", noiselevel=-1)
5676 if handler.circular_dep_message is not None:
5677 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
5679 suggestions = handler.suggestions
5681 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
5682 if len(suggestions) == 1:
5683 writemsg("by applying the following change:\n", noiselevel=-1)
5685 writemsg("by applying " + colorize("bold", "any of") + \
5686 " the following changes:\n", noiselevel=-1)
5687 writemsg("".join(suggestions), noiselevel=-1)
5688 writemsg("\nNote that this change can be reverted, once the package has" + \
5689 " been installed.\n", noiselevel=-1)
5690 if handler.large_cycle_count:
5691 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
5692 "Several changes might be required to resolve all cycles.\n" + \
5693 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
5695 writemsg("\n\n", noiselevel=-1)
5696 writemsg(prefix + "Note that circular dependencies " + \
5697 "can often be avoided by temporarily\n", noiselevel=-1)
5698 writemsg(prefix + "disabling USE flags that trigger " + \
5699 "optional dependencies.\n", noiselevel=-1)
5701 def _show_merge_list(self):
5702 if self._dynamic_config._serialized_tasks_cache is not None and \
5703 not (self._dynamic_config._displayed_list is not None and \
5704 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
5705 self._dynamic_config._displayed_list == \
5706 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
5707 display_list = self._dynamic_config._serialized_tasks_cache[:]
5708 if "--tree" in self._frozen_config.myopts:
5709 display_list.reverse()
5710 self.display(display_list)
5712 def _show_unsatisfied_blockers(self, blockers):
5713 self._show_merge_list()
5714 msg = "Error: The above package list contains " + \
5715 "packages which cannot be installed " + \
5716 "at the same time on the same system."
5717 prefix = colorize("BAD", " * ")
5718 portage.writemsg("\n", noiselevel=-1)
5719 for line in textwrap.wrap(msg, 70):
5720 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5722 # Display the conflicting packages along with the packages
5723 # that pulled them in. This is helpful for troubleshooting
5724 # cases in which blockers don't solve automatically and
5725 # the reasons are not apparent from the normal merge list
5729 for blocker in blockers:
5730 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
5731 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
5732 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
5733 if not parent_atoms:
5734 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
5735 if atom is not None:
5736 parent_atoms = set([("@selected", atom)])
5738 conflict_pkgs[pkg] = parent_atoms
5741 # Reduce noise by pruning packages that are only
5742 # pulled in by other conflict packages.
5744 for pkg, parent_atoms in conflict_pkgs.items():
5745 relevant_parent = False
5746 for parent, atom in parent_atoms:
5747 if parent not in conflict_pkgs:
5748 relevant_parent = True
5750 if not relevant_parent:
5751 pruned_pkgs.add(pkg)
5752 for pkg in pruned_pkgs:
5753 del conflict_pkgs[pkg]
5759 for pkg, parent_atoms in conflict_pkgs.items():
5761 # Prefer packages that are not directly involved in a conflict.
5762 # It can be essential to see all the packages here, so don't
5763 # omit any. If the list is long, people can simply use a pager.
5764 preferred_parents = set()
5765 for parent_atom in parent_atoms:
5766 parent, atom = parent_atom
5767 if parent not in conflict_pkgs:
5768 preferred_parents.add(parent_atom)
5770 ordered_list = list(preferred_parents)
5771 if len(parent_atoms) > len(ordered_list):
5772 for parent_atom in parent_atoms:
5773 if parent_atom not in preferred_parents:
5774 ordered_list.append(parent_atom)
5776 msg.append(indent + "%s pulled in by\n" % pkg)
5778 for parent_atom in ordered_list:
5779 parent, atom = parent_atom
5780 msg.append(2*indent)
5781 if isinstance(parent,
5782 (PackageArg, AtomArg)):
5783 # For PackageArg and AtomArg types, it's
5784 # redundant to display the atom attribute.
5785 msg.append(str(parent))
5787 # Display the specific atom from SetArg or
5789 msg.append("%s required by %s" % (atom, parent))
5794 writemsg("".join(msg), noiselevel=-1)
5796 if "--quiet" not in self._frozen_config.myopts:
5797 show_blocker_docs_link()
5799 def display(self, mylist, favorites=[], verbosity=None):
5801 # This is used to prevent display_problems() from
5802 # redundantly displaying this exact same merge list
5803 # again via _show_merge_list().
5804 self._dynamic_config._displayed_list = mylist
5807 return display(self, mylist, favorites, verbosity)
5809 def _display_autounmask(self):
5811 Display --autounmask message and optionally write it to config files
5812 (using CONFIG_PROTECT). The message includes the comments and the changes.
5815 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
5816 autounmask_unrestricted_atoms = \
5817 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
5818 quiet = "--quiet" in self._frozen_config.myopts
5819 pretend = "--pretend" in self._frozen_config.myopts
5820 ask = "--ask" in self._frozen_config.myopts
5821 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
5823 def check_if_latest(pkg):
5825 is_latest_in_slot = True
5826 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
5827 root_config = self._frozen_config.roots[pkg.root]
5829 for db, pkg_type, built, installed, db_keys in dbs:
5830 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
5831 if other_pkg.cp != pkg.cp:
5832 # old-style PROVIDE virtual means there are no
5833 # normal matches for this pkg_type
5837 if other_pkg.slot_atom == pkg.slot_atom:
5838 is_latest_in_slot = False
5841 # iter_match_pkgs yields highest version first, so
5842 # there's no need to search this pkg_type any further
5845 if not is_latest_in_slot:
5848 return is_latest, is_latest_in_slot
5850 #Set of roots we have autounmask changes for.
5853 masked_by_missing_keywords = False
5854 unstable_keyword_msg = {}
5855 for pkg in self._dynamic_config._needed_unstable_keywords:
5856 self._show_merge_list()
5857 if pkg in self._dynamic_config.digraph:
5860 unstable_keyword_msg.setdefault(root, [])
5861 is_latest, is_latest_in_slot = check_if_latest(pkg)
5862 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5863 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5864 use=self._pkg_use_enabled(pkg))
5865 for reason in mreasons:
5866 if reason.unmask_hint and \
5867 reason.unmask_hint.key == 'unstable keyword':
5868 keyword = reason.unmask_hint.value
5870 masked_by_missing_keywords = True
5872 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
5873 if autounmask_unrestricted_atoms:
5875 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
5876 elif is_latest_in_slot:
5877 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
5879 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5881 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5883 p_mask_change_msg = {}
5884 for pkg in self._dynamic_config._needed_p_mask_changes:
5885 self._show_merge_list()
5886 if pkg in self._dynamic_config.digraph:
5889 p_mask_change_msg.setdefault(root, [])
5890 is_latest, is_latest_in_slot = check_if_latest(pkg)
5891 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5892 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5893 use=self._pkg_use_enabled(pkg))
5894 for reason in mreasons:
5895 if reason.unmask_hint and \
5896 reason.unmask_hint.key == 'p_mask':
5897 keyword = reason.unmask_hint.value
5899 comment, filename = portage.getmaskingreason(
5900 pkg.cpv, metadata=pkg.metadata,
5901 settings=pkgsettings,
5902 portdb=pkg.root_config.trees["porttree"].dbapi,
5903 return_location=True)
5905 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
5907 p_mask_change_msg[root].append("# %s:\n" % filename)
5909 comment = [line for line in
5910 comment.splitlines() if line]
5911 for line in comment:
5912 p_mask_change_msg[root].append("%s\n" % line)
5913 if autounmask_unrestricted_atoms:
5915 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
5916 elif is_latest_in_slot:
5917 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
5919 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5921 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5923 use_changes_msg = {}
5924 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
5925 self._show_merge_list()
5926 if pkg in self._dynamic_config.digraph:
5929 use_changes_msg.setdefault(root, [])
5930 is_latest, is_latest_in_slot = check_if_latest(pkg)
5931 changes = needed_use_config_change[1]
5933 for flag, state in changes.items():
5935 adjustments.append(flag)
5937 adjustments.append("-" + flag)
5938 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5940 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5941 elif is_latest_in_slot:
5942 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
5944 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5947 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5948 self._show_merge_list()
5949 if pkg in self._dynamic_config.digraph:
5952 license_msg.setdefault(root, [])
5953 is_latest, is_latest_in_slot = check_if_latest(pkg)
5955 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
5957 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5958 elif is_latest_in_slot:
5959 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
5961 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5963 def find_config_file(abs_user_config, file_name):
5965 Searches /etc/portage for an appropriate file to append changes to.
5966 If the file_name is a file it is returned, if it is a directory, the
5967 last file in it is returned. Order of traversal is the identical to
5968 portage.util.grablines(recursive=True).
5970 file_name - String containing a file name like "package.use"
5971 return value - String. Absolute path of file to write to. None if
5972 no suitable file exists.
5974 file_path = os.path.join(abs_user_config, file_name)
5978 except OSError as e:
5979 if e.errno == errno.ENOENT:
5980 # The file doesn't exist, so we'll
5984 # Disk or file system trouble?
5987 last_file_path = None
5996 if stat.S_ISREG(st.st_mode):
5998 elif stat.S_ISDIR(st.st_mode):
5999 if os.path.basename(p) in _ignorecvs_dirs:
6002 contents = os.listdir(p)
6006 contents.sort(reverse=True)
6007 for child in contents:
6008 if child.startswith(".") or \
6009 child.endswith("~"):
6011 stack.append(os.path.join(p, child))
6013 return last_file_path
6015 write_to_file = autounmask_write and not pretend
6016 #Make sure we have a file to write to before doing any write.
6017 file_to_write_to = {}
6021 settings = self._frozen_config.roots[root].settings
6022 abs_user_config = os.path.join(
6023 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6025 if root in unstable_keyword_msg:
6026 if not os.path.exists(os.path.join(abs_user_config,
6027 "package.keywords")):
6028 filename = "package.accept_keywords"
6030 filename = "package.keywords"
6031 file_to_write_to[(abs_user_config, "package.keywords")] = \
6032 find_config_file(abs_user_config, filename)
6034 if root in p_mask_change_msg:
6035 file_to_write_to[(abs_user_config, "package.unmask")] = \
6036 find_config_file(abs_user_config, "package.unmask")
6038 if root in use_changes_msg:
6039 file_to_write_to[(abs_user_config, "package.use")] = \
6040 find_config_file(abs_user_config, "package.use")
6042 if root in license_msg:
6043 file_to_write_to[(abs_user_config, "package.license")] = \
6044 find_config_file(abs_user_config, "package.license")
6046 for (abs_user_config, f), path in file_to_write_to.items():
6048 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6050 write_to_file = not problems
6053 settings = self._frozen_config.roots[root].settings
6054 abs_user_config = os.path.join(
6055 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6058 writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
6060 if root in unstable_keyword_msg:
6061 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
6062 " are necessary to proceed:\n", noiselevel=-1)
6063 writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
6065 if root in p_mask_change_msg:
6066 writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
6067 " are necessary to proceed:\n", noiselevel=-1)
6068 writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
6070 if root in use_changes_msg:
6071 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
6072 " are necessary to proceed:\n", noiselevel=-1)
6073 writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
6075 if root in license_msg:
6076 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
6077 " are necessary to proceed:\n", noiselevel=-1)
6078 writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
6083 settings = self._frozen_config.roots[root].settings
6084 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6085 shlex_split(settings.get("CONFIG_PROTECT", "")),
6086 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6088 def write_changes(root, changes, file_to_write_to):
6089 file_contents = None
6091 file_contents = io.open(
6092 _unicode_encode(file_to_write_to,
6093 encoding=_encodings['fs'], errors='strict'),
6094 mode='r', encoding=_encodings['content'],
6095 errors='replace').readlines()
6096 except IOError as e:
6097 if e.errno == errno.ENOENT:
6100 problems.append("!!! Failed to read '%s': %s\n" % \
6101 (file_to_write_to, e))
6102 if file_contents is not None:
6103 file_contents.extend(changes)
6104 if protect_obj[root].isprotected(file_to_write_to):
6105 # We want to force new_protect_filename to ensure
6106 # that the user will see all our changes via
6107 # etc-update, even if file_to_write_to doesn't
6108 # exist yet, so we specify force=True.
6109 file_to_write_to = new_protect_filename(file_to_write_to,
6112 write_atomic(file_to_write_to, "".join(file_contents))
6113 except PortageException:
6114 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6116 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
6119 "NOTE: The --autounmask-keep-masks option will prevent emerge",
6120 " from creating package.unmask or ** keyword changes."
6124 line = colorize("INFORM", line)
6125 writemsg_stdout(line + "\n", noiselevel=-1)
6127 if ask and write_to_file and file_to_write_to:
6128 prompt = "\nWould you like to add these " + \
6129 "changes to your config files?"
6130 if userquery(prompt, enter_invalid) == 'No':
6131 write_to_file = False
6133 if write_to_file and file_to_write_to:
6135 settings = self._frozen_config.roots[root].settings
6136 abs_user_config = os.path.join(
6137 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6138 ensure_dirs(abs_user_config)
6140 if root in unstable_keyword_msg:
6141 write_changes(root, unstable_keyword_msg[root],
6142 file_to_write_to.get((abs_user_config, "package.keywords")))
6144 if root in p_mask_change_msg:
6145 write_changes(root, p_mask_change_msg[root],
6146 file_to_write_to.get((abs_user_config, "package.unmask")))
6148 if root in use_changes_msg:
6149 write_changes(root, use_changes_msg[root],
6150 file_to_write_to.get((abs_user_config, "package.use")))
6152 if root in license_msg:
6153 write_changes(root, license_msg[root],
6154 file_to_write_to.get((abs_user_config, "package.license")))
6157 writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
6159 writemsg_stdout("".join(problems), noiselevel=-1)
6160 elif write_to_file and roots:
6161 writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
6163 elif not pretend and not autounmask_write and roots:
6164 writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
6168 def display_problems(self):
6170 Display problems with the dependency graph such as slot collisions.
6171 This is called internally by display() to show the problems _after_
6172 the merge list where it is most likely to be seen, but if display()
6173 is not going to be called then this method should be called explicitly
6174 to ensure that the user is notified of problems with the graph.
6176 All output goes to stderr, except for unsatisfied dependencies which
6177 go to stdout for parsing by programs such as autounmask.
6180 # Note that show_masked_packages() sends its output to
6181 # stdout, and some programs such as autounmask parse the
6182 # output in cases when emerge bails out. However, when
6183 # show_masked_packages() is called for installed packages
6184 # here, the message is a warning that is more appropriate
6185 # to send to stderr, so temporarily redirect stdout to
6186 # stderr. TODO: Fix output code so there's a cleaner way
6187 # to redirect everything to stderr.
6192 sys.stdout = sys.stderr
6193 self._display_problems()
6199 # This goes to stdout for parsing by programs like autounmask.
6200 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6201 self._show_unsatisfied_dep(*pargs, **kwargs)
6203 def _display_problems(self):
6204 if self._dynamic_config._circular_deps_for_display is not None:
6205 self._show_circular_deps(
6206 self._dynamic_config._circular_deps_for_display)
6208 # The slot conflict display has better noise reduction than
6209 # the unsatisfied blockers display, so skip unsatisfied blockers
6210 # display if there are slot conflicts (see bug #385391).
6211 if self._dynamic_config._slot_collision_info:
6212 self._show_slot_collision_notice()
6213 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
6214 self._show_unsatisfied_blockers(
6215 self._dynamic_config._unsatisfied_blockers_for_display)
6217 self._show_missed_update()
6219 self._show_ignored_binaries()
6221 self._display_autounmask()
6223 # TODO: Add generic support for "set problem" handlers so that
6224 # the below warnings aren't special cases for world only.
6226 if self._dynamic_config._missing_args:
6227 world_problems = False
6228 if "world" in self._dynamic_config.sets[
6229 self._frozen_config.target_root].sets:
6230 # Filter out indirect members of world (from nested sets)
6231 # since only direct members of world are desired here.
6232 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
6233 for arg, atom in self._dynamic_config._missing_args:
6234 if arg.name in ("selected", "world") and atom in world_set:
6235 world_problems = True
6239 sys.stderr.write("\n!!! Problems have been " + \
6240 "detected with your world file\n")
6241 sys.stderr.write("!!! Please run " + \
6242 green("emaint --check world")+"\n\n")
6244 if self._dynamic_config._missing_args:
6245 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6246 " Ebuilds for the following packages are either all\n")
6247 sys.stderr.write(colorize("BAD", "!!!") + \
6248 " masked or don't exist:\n")
6249 sys.stderr.write(" ".join(str(atom) for arg, atom in \
6250 self._dynamic_config._missing_args) + "\n")
6252 if self._dynamic_config._pprovided_args:
6254 for arg, atom in self._dynamic_config._pprovided_args:
6255 if isinstance(arg, SetArg):
6257 arg_atom = (atom, atom)
6260 arg_atom = (arg.arg, atom)
6261 refs = arg_refs.setdefault(arg_atom, [])
6262 if parent not in refs:
6265 msg.append(bad("\nWARNING: "))
6266 if len(self._dynamic_config._pprovided_args) > 1:
6267 msg.append("Requested packages will not be " + \
6268 "merged because they are listed in\n")
6270 msg.append("A requested package will not be " + \
6271 "merged because it is listed in\n")
6272 msg.append("package.provided:\n\n")
6273 problems_sets = set()
6274 for (arg, atom), refs in arg_refs.items():
6277 problems_sets.update(refs)
6279 ref_string = ", ".join(["'%s'" % name for name in refs])
6280 ref_string = " pulled in by " + ref_string
6281 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
6283 if "selected" in problems_sets or "world" in problems_sets:
6284 msg.append("This problem can be solved in one of the following ways:\n\n")
6285 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
6286 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
6287 msg.append(" C) Remove offending entries from package.provided.\n\n")
6288 msg.append("The best course of action depends on the reason that an offending\n")
6289 msg.append("package.provided entry exists.\n\n")
6290 sys.stderr.write("".join(msg))
6292 masked_packages = []
6293 for pkg in self._dynamic_config._masked_license_updates:
6294 root_config = pkg.root_config
6295 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6296 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
6297 masked_packages.append((root_config, pkgsettings,
6298 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6300 writemsg("\n" + colorize("BAD", "!!!") + \
6301 " The following updates are masked by LICENSE changes:\n",
6303 show_masked_packages(masked_packages)
6305 writemsg("\n", noiselevel=-1)
6307 masked_packages = []
6308 for pkg in self._dynamic_config._masked_installed:
6309 root_config = pkg.root_config
6310 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6311 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
6312 masked_packages.append((root_config, pkgsettings,
6313 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6315 writemsg("\n" + colorize("BAD", "!!!") + \
6316 " The following installed packages are masked:\n",
6318 show_masked_packages(masked_packages)
6320 writemsg("\n", noiselevel=-1)
6322 def saveNomergeFavorites(self):
6323 """Find atoms in favorites that are not in the mergelist and add them
6324 to the world file if necessary."""
6325 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6326 "--oneshot", "--onlydeps", "--pretend"):
6327 if x in self._frozen_config.myopts:
6329 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6330 world_set = root_config.sets["selected"]
6332 world_locked = False
6333 if hasattr(world_set, "lock"):
6337 if hasattr(world_set, "load"):
6338 world_set.load() # maybe it's changed on disk
6340 args_set = self._dynamic_config.sets[
6341 self._frozen_config.target_root].sets['__non_set_args__']
6342 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
6343 added_favorites = set()
6344 for x in self._dynamic_config._set_nodes:
6345 if x.operation != "nomerge":
6348 if x.root != root_config.root:
6352 myfavkey = create_world_atom(x, args_set, root_config)
6354 if myfavkey in added_favorites:
6356 added_favorites.add(myfavkey)
6357 except portage.exception.InvalidDependString as e:
6358 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6359 (x.cpv, e), noiselevel=-1)
6360 writemsg("!!! see '%s'\n\n" % os.path.join(
6361 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6364 for arg in self._dynamic_config._initial_arg_list:
6365 if not isinstance(arg, SetArg):
6367 if arg.root_config.root != root_config.root:
6370 if k in ("selected", "world") or \
6371 not root_config.sets[k].world_candidate:
6376 all_added.append(SETPREFIX + k)
6377 all_added.extend(added_favorites)
6381 ">>> Recording %s in \"world\" favorites file...\n" % \
6382 colorize("INFORM", str(a)), noiselevel=-1)
6384 world_set.update(all_added)
6389 def _loadResumeCommand(self, resume_data, skip_masked=True,
6392 Add a resume command to the graph and validate it in the process. This
6393 will raise a PackageNotFound exception if a package is not available.
6398 if not isinstance(resume_data, dict):
6401 mergelist = resume_data.get("mergelist")
6402 if not isinstance(mergelist, list):
6405 favorites = resume_data.get("favorites")
6406 args_set = self._dynamic_config.sets[
6407 self._frozen_config.target_root].sets['__non_set_args__']
6408 if isinstance(favorites, list):
6409 args = self._load_favorites(favorites)
6413 fakedb = self._dynamic_config.mydbapi
6414 trees = self._frozen_config.trees
6415 serialized_tasks = []
6418 if not (isinstance(x, list) and len(x) == 4):
6420 pkg_type, myroot, pkg_key, action = x
6421 if pkg_type not in self.pkg_tree_map:
6423 if action != "merge":
6425 root_config = self._frozen_config.roots[myroot]
6427 # Use the resume "favorites" list to see if a repo was specified
6429 depgraph_sets = self._dynamic_config.sets[root_config.root]
6431 for atom in depgraph_sets.atoms.getAtoms():
6432 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6436 atom = "=" + pkg_key
6438 atom = atom + _repo_separator + repo
6441 atom = Atom(atom, allow_repo=True)
6446 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6447 if not self._pkg_visibility_check(pkg) or \
6448 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6449 modified_use=self._pkg_use_enabled(pkg)):
6454 # It does no exist or it is corrupt.
6456 # TODO: log these somewhere
6458 raise portage.exception.PackageNotFound(pkg_key)
6460 if "merge" == pkg.operation and \
6461 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6462 modified_use=self._pkg_use_enabled(pkg)):
6465 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6467 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6469 self._dynamic_config._unsatisfied_deps_for_display.append(
6470 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6472 fakedb[myroot].cpv_inject(pkg)
6473 serialized_tasks.append(pkg)
6474 self._spinner_update()
6476 if self._dynamic_config._unsatisfied_deps_for_display:
6479 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6480 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6481 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6483 self._select_package = self._select_pkg_from_graph
6484 self._dynamic_config.myparams["selective"] = True
6485 # Always traverse deep dependencies in order to account for
6486 # potentially unsatisfied dependencies of installed packages.
6487 # This is necessary for correct --keep-going or --resume operation
6488 # in case a package from a group of circularly dependent packages
6489 # fails. In this case, a package which has recently been installed
6490 # may have an unsatisfied circular dependency (pulled in by
6491 # PDEPEND, for example). So, even though a package is already
6492 # installed, it may not have all of it's dependencies satisfied, so
6493 # it may not be usable. If such a package is in the subgraph of
6494 # deep depenedencies of a scheduled build, that build needs to
6495 # be cancelled. In order for this type of situation to be
6496 # recognized, deep traversal of dependencies is required.
6497 self._dynamic_config.myparams["deep"] = True
6499 for task in serialized_tasks:
6500 if isinstance(task, Package) and \
6501 task.operation == "merge":
6502 if not self._add_pkg(task, None):
6505 # Packages for argument atoms need to be explicitly
6506 # added via _add_pkg() so that they are included in the
6507 # digraph (needed at least for --tree display).
6508 for arg in self._expand_set_args(args, add_to_digraph=True):
6509 for atom in arg.pset.getAtoms():
6510 pkg, existing_node = self._select_package(
6511 arg.root_config.root, atom)
6512 if existing_node is None and \
6514 if not self._add_pkg(pkg, Dependency(atom=atom,
6515 root=pkg.root, parent=arg)):
6518 # Allow unsatisfied deps here to avoid showing a masking
6519 # message for an unsatisfied dep that isn't necessarily
6521 if not self._create_graph(allow_unsatisfied=True):
6524 unsatisfied_deps = []
6525 for dep in self._dynamic_config._unsatisfied_deps:
6526 if not isinstance(dep.parent, Package):
6528 if dep.parent.operation == "merge":
6529 unsatisfied_deps.append(dep)
6532 # For unsatisfied deps of installed packages, only account for
6533 # them if they are in the subgraph of dependencies of a package
6534 # which is scheduled to be installed.
6535 unsatisfied_install = False
6537 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6539 node = dep_stack.pop()
6540 if not isinstance(node, Package):
6542 if node.operation == "merge":
6543 unsatisfied_install = True
6545 if node in traversed:
6548 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
6550 if unsatisfied_install:
6551 unsatisfied_deps.append(dep)
6553 if masked_tasks or unsatisfied_deps:
6554 # This probably means that a required package
6555 # was dropped via --skipfirst. It makes the
6556 # resume list invalid, so convert it to a
6557 # UnsatisfiedResumeDep exception.
6558 raise self.UnsatisfiedResumeDep(self,
6559 masked_tasks + unsatisfied_deps)
6560 self._dynamic_config._serialized_tasks_cache = None
6563 except self._unknown_internal_error:
6568 def _load_favorites(self, favorites):
6570 Use a list of favorites to resume state from a
6571 previous select_files() call. This creates similar
6572 DependencyArg instances to those that would have
6573 been created by the original select_files() call.
6574 This allows Package instances to be matched with
6575 DependencyArg instances during graph creation.
6577 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6578 sets = root_config.sets
6579 depgraph_sets = self._dynamic_config.sets[root_config.root]
6582 if not isinstance(x, basestring):
6584 if x in ("system", "world"):
6586 if x.startswith(SETPREFIX):
6587 s = x[len(SETPREFIX):]
6590 if s in depgraph_sets.sets:
6593 depgraph_sets.sets[s] = pset
6594 args.append(SetArg(arg=x, pset=pset,
6595 root_config=root_config))
6598 x = Atom(x, allow_repo=True)
6599 except portage.exception.InvalidAtom:
6601 args.append(AtomArg(arg=x, atom=x,
6602 root_config=root_config))
6604 self._set_args(args)
6607 class UnsatisfiedResumeDep(portage.exception.PortageException):
6609 A dependency of a resume list is not installed. This
6610 can occur when a required package is dropped from the
6611 merge list via --skipfirst.
6613 def __init__(self, depgraph, value):
6614 portage.exception.PortageException.__init__(self, value)
6615 self.depgraph = depgraph
6617 class _internal_exception(portage.exception.PortageException):
6618 def __init__(self, value=""):
6619 portage.exception.PortageException.__init__(self, value)
6621 class _unknown_internal_error(_internal_exception):
6623 Used by the depgraph internally to terminate graph creation.
6624 The specific reason for the failure should have been dumped
6625 to stderr, unfortunately, the exact reason for the failure
6629 class _serialize_tasks_retry(_internal_exception):
6631 This is raised by the _serialize_tasks() method when it needs to
6632 be called again for some reason. The only case that it's currently
6633 used for is when neglected dependencies need to be added to the
6634 graph in order to avoid making a potentially unsafe decision.
6637 class _backtrack_mask(_internal_exception):
6639 This is raised by _show_unsatisfied_dep() when it's called with
6640 check_backtrack=True and a matching package has been masked by
6644 class _autounmask_breakage(_internal_exception):
6646 This is raised by _show_unsatisfied_dep() when it's called with
6647 check_autounmask_breakage=True and a matching package has been
6648 been disqualified due to autounmask changes.
6651 def need_restart(self):
6652 return self._dynamic_config._need_restart and \
6653 not self._dynamic_config._skip_restart
6655 def success_without_autounmask(self):
6656 return self._dynamic_config._success_without_autounmask
6658 def autounmask_breakage_detected(self):
6660 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6661 self._show_unsatisfied_dep(
6662 *pargs, check_autounmask_breakage=True, **kwargs)
6663 except self._autounmask_breakage:
6667 def get_backtrack_infos(self):
6668 return self._dynamic_config._backtrack_infos
6671 class _dep_check_composite_db(dbapi):
6673 A dbapi-like interface that is optimized for use in dep_check() calls.
6674 This is built on top of the existing depgraph package selection logic.
6675 Some packages that have been added to the graph may be masked from this
6676 view in order to influence the atom preference selection that occurs
6679 def __init__(self, depgraph, root):
6680 dbapi.__init__(self)
6681 self._depgraph = depgraph
6683 self._match_cache = {}
6684 self._cpv_pkg_map = {}
6686 def _clear_cache(self):
6687 self._match_cache.clear()
6688 self._cpv_pkg_map.clear()
6690 def cp_list(self, cp):
6692 Emulate cp_list just so it can be used to check for existence
6693 of new-style virtuals. Since it's a waste of time to return
6694 more than one cpv for this use case, a maximum of one cpv will
6697 if isinstance(cp, Atom):
6702 for pkg in self._depgraph._iter_match_pkgs_any(
6703 self._depgraph._frozen_config.roots[self._root], atom):
6710 def match(self, atom):
6711 ret = self._match_cache.get(atom)
6716 pkg, existing = self._depgraph._select_package(self._root, atom)
6718 if pkg is not None and self._visible(pkg):
6719 self._cpv_pkg_map[pkg.cpv] = pkg
6722 if pkg is not None and \
6723 atom.slot is None and \
6724 pkg.cp.startswith("virtual/") and \
6725 (("remove" not in self._depgraph._dynamic_config.myparams and
6726 "--update" not in self._depgraph._frozen_config.myopts) or
6728 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
6729 # For new-style virtual lookahead that occurs inside dep_check()
6730 # for bug #141118, examine all slots. This is needed so that newer
6731 # slots will not unnecessarily be pulled in when a satisfying lower
6732 # slot is already installed. For example, if virtual/jdk-1.5 is
6733 # satisfied via gcj-jdk then there's no need to pull in a newer
6734 # slot to satisfy a virtual/jdk dependency, unless --update is
6738 for virt_pkg in self._depgraph._iter_match_pkgs_any(
6739 self._depgraph._frozen_config.roots[self._root], atom):
6740 if virt_pkg.cp != pkg.cp:
6742 slots.add(virt_pkg.slot)
6744 slots.remove(pkg.slot)
6746 slot_atom = atom.with_slot(slots.pop())
6747 pkg, existing = self._depgraph._select_package(
6748 self._root, slot_atom)
6751 if not self._visible(pkg):
6753 self._cpv_pkg_map[pkg.cpv] = pkg
6757 self._cpv_sort_ascending(ret)
6759 self._match_cache[atom] = ret
6762 def _visible(self, pkg):
6763 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
6765 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
6766 except (StopIteration, portage.exception.InvalidDependString):
6770 if pkg.installed and \
6771 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
6772 # Account for packages with masks (like KEYWORDS masks)
6773 # that are usually ignored in visibility checks for
6774 # installed packages, in order to handle cases like
6776 myopts = self._depgraph._frozen_config.myopts
6777 use_ebuild_visibility = myopts.get(
6778 '--use-ebuild-visibility', 'n') != 'n'
6779 avoid_update = "--update" not in myopts and \
6780 "remove" not in self._depgraph._dynamic_config.myparams
6781 usepkgonly = "--usepkgonly" in myopts
6782 if not avoid_update:
6783 if not use_ebuild_visibility and usepkgonly:
6787 pkg_eb = self._depgraph._pkg(
6788 pkg.cpv, "ebuild", pkg.root_config,
6790 except portage.exception.PackageNotFound:
6791 pkg_eb_visible = False
6792 for pkg_eb in self._depgraph._iter_match_pkgs(
6793 pkg.root_config, "ebuild",
6794 Atom("=%s" % (pkg.cpv,))):
6795 if self._depgraph._pkg_visibility_check(pkg_eb):
6796 pkg_eb_visible = True
6798 if not pkg_eb_visible:
6801 if not self._depgraph._pkg_visibility_check(pkg_eb):
6804 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
6805 self._root].get(pkg.slot_atom)
6806 if in_graph is None:
6807 # Mask choices for packages which are not the highest visible
6808 # version within their slot (since they usually trigger slot
6810 highest_visible, in_graph = self._depgraph._select_package(
6811 self._root, pkg.slot_atom)
6812 # Note: highest_visible is not necessarily the real highest
6813 # visible, especially when --update is not enabled, so use
6814 # < operator instead of !=.
6815 if pkg < highest_visible:
6817 elif in_graph != pkg:
6818 # Mask choices for packages that would trigger a slot
6819 # conflict with a previously selected package.
6823 def aux_get(self, cpv, wants):
6824 metadata = self._cpv_pkg_map[cpv].metadata
6825 return [metadata.get(x, "") for x in wants]
6827 def match_pkgs(self, atom):
6828 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
6830 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
6832 if "--quiet" in myopts:
6833 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6834 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
6835 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6836 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
6839 s = search(root_config, spinner, "--searchdesc" in myopts,
6840 "--quiet" not in myopts, "--usepkg" in myopts,
6841 "--usepkgonly" in myopts)
6842 null_cp = portage.dep_getkey(insert_category_into_atom(
6844 cat, atom_pn = portage.catsplit(null_cp)
6845 s.searchkey = atom_pn
6846 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6849 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6850 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
6852 def _spinner_start(spinner, myopts):
6855 if "--quiet" not in myopts and \
6856 ("--pretend" in myopts or "--ask" in myopts or \
6857 "--tree" in myopts or "--verbose" in myopts):
6859 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
6861 elif "--buildpkgonly" in myopts:
6865 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
6866 if "--unordered-display" in myopts:
6867 portage.writemsg_stdout("\n" + \
6868 darkgreen("These are the packages that " + \
6869 "would be %s:" % action) + "\n\n")
6871 portage.writemsg_stdout("\n" + \
6872 darkgreen("These are the packages that " + \
6873 "would be %s, in reverse order:" % action) + "\n\n")
6875 portage.writemsg_stdout("\n" + \
6876 darkgreen("These are the packages that " + \
6877 "would be %s, in order:" % action) + "\n\n")
6879 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
6880 if not show_spinner:
6881 spinner.update = spinner.update_quiet
6884 portage.writemsg_stdout("Calculating dependencies ")
6886 def _spinner_stop(spinner):
6887 if spinner is None or \
6888 spinner.update == spinner.update_quiet:
6891 if spinner.update != spinner.update_basic:
6892 # update_basic is used for non-tty output,
6893 # so don't output backspaces in that case.
6894 portage.writemsg_stdout("\b\b")
6896 portage.writemsg_stdout("... done!\n")
6898 def backtrack_depgraph(settings, trees, myopts, myparams,
6899 myaction, myfiles, spinner):
6901 Raises PackageSetNotFound if myfiles contains a missing package set.
6903 _spinner_start(spinner, myopts)
6905 return _backtrack_depgraph(settings, trees, myopts, myparams,
6906 myaction, myfiles, spinner)
6908 _spinner_stop(spinner)
6911 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
6913 debug = "--debug" in myopts
6915 max_retries = myopts.get('--backtrack', 10)
6916 max_depth = max(1, (max_retries + 1) / 2)
6917 allow_backtracking = max_retries > 0
6918 backtracker = Backtracker(max_depth)
6921 frozen_config = _frozen_depgraph_config(settings, trees,
6926 if debug and mydepgraph is not None:
6928 "\n\nbacktracking try %s \n\n" % \
6929 backtracked, noiselevel=-1, level=logging.DEBUG)
6930 mydepgraph.display_problems()
6932 backtrack_parameters = backtracker.get()
6934 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6935 frozen_config=frozen_config,
6936 allow_backtracking=allow_backtracking,
6937 backtrack_parameters=backtrack_parameters)
6938 success, favorites = mydepgraph.select_files(myfiles)
6940 if success or mydepgraph.success_without_autounmask():
6942 elif not allow_backtracking:
6944 elif backtracked >= max_retries:
6946 elif mydepgraph.need_restart():
6948 backtracker.feedback(mydepgraph.get_backtrack_infos())
6952 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
6956 "\n\nbacktracking aborted after %s tries\n\n" % \
6957 backtracked, noiselevel=-1, level=logging.DEBUG)
6958 mydepgraph.display_problems()
6960 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6961 frozen_config=frozen_config,
6962 allow_backtracking=False,
6963 backtrack_parameters=backtracker.get_best_run())
6964 success, favorites = mydepgraph.select_files(myfiles)
6966 if not success and mydepgraph.autounmask_breakage_detected():
6969 "\n\nautounmask breakage detected\n\n",
6970 noiselevel=-1, level=logging.DEBUG)
6971 mydepgraph.display_problems()
6972 myopts["--autounmask"] = "n"
6973 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6974 frozen_config=frozen_config, allow_backtracking=False)
6975 success, favorites = mydepgraph.select_files(myfiles)
6977 return (success, mydepgraph, favorites)
6980 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6982 Raises PackageSetNotFound if myfiles contains a missing package set.
6984 _spinner_start(spinner, myopts)
6986 return _resume_depgraph(settings, trees, mtimedb, myopts,
6989 _spinner_stop(spinner)
6991 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6993 Construct a depgraph for the given resume list. This will raise
6994 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
6995 TODO: Return reasons for dropped_tasks, for display/logging.
6997 @returns: (success, depgraph, dropped_tasks)
7000 skip_unsatisfied = True
7001 mergelist = mtimedb["resume"]["mergelist"]
7002 dropped_tasks = set()
7003 frozen_config = _frozen_depgraph_config(settings, trees,
7006 mydepgraph = depgraph(settings, trees,
7007 myopts, myparams, spinner, frozen_config=frozen_config)
7009 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7010 skip_masked=skip_masked)
7011 except depgraph.UnsatisfiedResumeDep as e:
7012 if not skip_unsatisfied:
7015 graph = mydepgraph._dynamic_config.digraph
7016 unsatisfied_parents = dict((dep.parent, dep.parent) \
7018 traversed_nodes = set()
7019 unsatisfied_stack = list(unsatisfied_parents)
7020 while unsatisfied_stack:
7021 pkg = unsatisfied_stack.pop()
7022 if pkg in traversed_nodes:
7024 traversed_nodes.add(pkg)
7026 # If this package was pulled in by a parent
7027 # package scheduled for merge, removing this
7028 # package may cause the the parent package's
7029 # dependency to become unsatisfied.
7030 for parent_node in graph.parent_nodes(pkg):
7031 if not isinstance(parent_node, Package) \
7032 or parent_node.operation not in ("merge", "nomerge"):
7034 # We need to traverse all priorities here, in order to
7035 # ensure that a package with an unsatisfied depenedency
7036 # won't get pulled in, even indirectly via a soft
7038 unsatisfied_parents[parent_node] = parent_node
7039 unsatisfied_stack.append(parent_node)
7041 unsatisfied_tuples = frozenset(tuple(parent_node)
7042 for parent_node in unsatisfied_parents
7043 if isinstance(parent_node, Package))
7044 pruned_mergelist = []
7046 if isinstance(x, list) and \
7047 tuple(x) not in unsatisfied_tuples:
7048 pruned_mergelist.append(x)
7050 # If the mergelist doesn't shrink then this loop is infinite.
7051 if len(pruned_mergelist) == len(mergelist):
7052 # This happens if a package can't be dropped because
7053 # it's already installed, but it has unsatisfied PDEPEND.
7055 mergelist[:] = pruned_mergelist
7057 # Exclude installed packages that have been removed from the graph due
7058 # to failure to build/install runtime dependencies after the dependent
7059 # package has already been installed.
7060 dropped_tasks.update(pkg for pkg in \
7061 unsatisfied_parents if pkg.operation != "nomerge")
7063 del e, graph, traversed_nodes, \
7064 unsatisfied_parents, unsatisfied_stack
7068 return (success, mydepgraph, dropped_tasks)
7070 def get_mask_info(root_config, cpv, pkgsettings,
7071 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
7074 metadata = dict(zip(db_keys,
7075 db.aux_get(cpv, db_keys, myrepo=myrepo)))
7079 if metadata is None:
7080 mreasons = ["corruption"]
7082 eapi = metadata['EAPI']
7085 if not portage.eapi_is_supported(eapi):
7086 mreasons = ['EAPI %s' % eapi]
7088 pkg = Package(type_name=pkg_type, root_config=root_config,
7089 cpv=cpv, built=built, installed=installed, metadata=metadata)
7092 if _pkg_use_enabled is not None:
7093 modified_use = _pkg_use_enabled(pkg)
7095 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7097 return metadata, mreasons
7099 def show_masked_packages(masked_packages):
7100 shown_licenses = set()
7101 shown_comments = set()
7102 # Maybe there is both an ebuild and a binary. Only
7103 # show one of them to avoid redundant appearance.
7105 have_eapi_mask = False
7106 for (root_config, pkgsettings, cpv, repo,
7107 metadata, mreasons) in masked_packages:
7110 output_cpv += _repo_separator + repo
7111 if output_cpv in shown_cpvs:
7113 shown_cpvs.add(output_cpv)
7114 eapi_masked = metadata is not None and \
7115 not portage.eapi_is_supported(metadata["EAPI"])
7117 have_eapi_mask = True
7118 # When masked by EAPI, metadata is mostly useless since
7119 # it doesn't contain essential things like SLOT.
7121 comment, filename = None, None
7122 if not eapi_masked and \
7123 "package.mask" in mreasons:
7124 comment, filename = \
7125 portage.getmaskingreason(
7126 cpv, metadata=metadata,
7127 settings=pkgsettings,
7128 portdb=root_config.trees["porttree"].dbapi,
7129 return_location=True)
7130 missing_licenses = []
7131 if not eapi_masked and metadata is not None:
7133 missing_licenses = \
7134 pkgsettings._getMissingLicenses(
7136 except portage.exception.InvalidDependString:
7137 # This will have already been reported
7138 # above via mreasons.
7141 writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
7143 if comment and comment not in shown_comments:
7144 writemsg_stdout(filename + ":\n" + comment + "\n",
7146 shown_comments.add(comment)
7147 portdb = root_config.trees["porttree"].dbapi
7148 for l in missing_licenses:
7149 l_path = portdb.findLicensePath(l)
7150 if l in shown_licenses:
7152 msg = ("A copy of the '%s' license" + \
7153 " is located at '%s'.\n\n") % (l, l_path)
7154 writemsg_stdout(msg, noiselevel=-1)
7155 shown_licenses.add(l)
7156 return have_eapi_mask
7158 def show_mask_docs():
7159 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
7160 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7162 def show_blocker_docs_link():
7163 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7164 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7165 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7167 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7168 return [mreason.message for \
7169 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7171 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7172 mreasons = _getmaskingstatus(
7173 pkg, settings=pkgsettings,
7174 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7176 if not pkg.installed:
7177 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
7178 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7179 pkg.metadata["CHOST"]))
7182 for msg_type, msgs in pkg.invalid.items():
7185 _MaskReason("invalid", "invalid: %s" % (msg,)))
7187 if not pkg.metadata["SLOT"]:
7189 _MaskReason("invalid", "SLOT: undefined"))