1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
15 from collections import deque
16 from itertools import chain
19 from portage import os, OrderedDict
20 from portage import _unicode_decode, _unicode_encode, _encodings
21 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
22 from portage.dbapi import dbapi
23 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
24 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
25 from portage.exception import InvalidAtom, InvalidDependString, PortageException
26 from portage.output import colorize, create_color_func, \
28 bad = create_color_func("BAD")
29 from portage.package.ebuild.getmaskingstatus import \
30 _getmaskingstatus, _MaskReason
31 from portage._sets import SETPREFIX
32 from portage._sets.base import InternalPackageSet
33 from portage.util import ConfigProtect, shlex_split, new_protect_filename
34 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
35 from portage.util import writemsg_level, write_atomic
36 from portage.util.digraph import digraph
37 from portage.util.listdir import _ignorecvs_dirs
38 from portage.versions import catpkgsplit
40 from _emerge.AtomArg import AtomArg
41 from _emerge.Blocker import Blocker
42 from _emerge.BlockerCache import BlockerCache
43 from _emerge.BlockerDepPriority import BlockerDepPriority
44 from _emerge.countdown import countdown
45 from _emerge.create_world_atom import create_world_atom
46 from _emerge.Dependency import Dependency
47 from _emerge.DependencyArg import DependencyArg
48 from _emerge.DepPriority import DepPriority
49 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
50 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
51 from _emerge.FakeVartree import FakeVartree
52 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
53 from _emerge.is_valid_package_atom import is_valid_package_atom
54 from _emerge.Package import Package
55 from _emerge.PackageArg import PackageArg
56 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
57 from _emerge.RootConfig import RootConfig
58 from _emerge.search import search
59 from _emerge.SetArg import SetArg
60 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
61 from _emerge.UnmergeDepPriority import UnmergeDepPriority
62 from _emerge.UseFlagDisplay import pkg_use_display
63 from _emerge.userquery import userquery
65 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
66 from _emerge.resolver.slot_collision import slot_conflict_handler
67 from _emerge.resolver.circular_dependency import circular_dependency_handler
68 from _emerge.resolver.output import Display
70 if sys.hexversion >= 0x3000000:
74 class _scheduler_graph_config(object):
75 def __init__(self, trees, pkg_cache, graph, mergelist):
77 self.pkg_cache = pkg_cache
79 self.mergelist = mergelist
81 def _wildcard_set(atoms):
82 pkgs = InternalPackageSet(allow_wildcard=True)
85 x = Atom(x, allow_wildcard=True)
86 except portage.exception.InvalidAtom:
87 x = Atom("*/" + x, allow_wildcard=True)
91 class _frozen_depgraph_config(object):
93 def __init__(self, settings, trees, myopts, spinner):
94 self.settings = settings
95 self.target_root = settings["ROOT"]
98 if settings.get("PORTAGE_DEBUG", "") == "1":
100 self.spinner = spinner
101 self._running_root = trees["/"]["root_config"]
102 self._opts_no_restart = frozenset(["--buildpkgonly",
103 "--fetchonly", "--fetch-all-uri", "--pretend"])
104 self.pkgsettings = {}
106 self._trees_orig = trees
108 # All Package instances
110 self._highest_license_masked = {}
112 self.trees[myroot] = {}
113 # Create a RootConfig instance that references
114 # the FakeVartree instead of the real one.
115 self.roots[myroot] = RootConfig(
116 trees[myroot]["vartree"].settings,
118 trees[myroot]["root_config"].setconfig)
119 for tree in ("porttree", "bintree"):
120 self.trees[myroot][tree] = trees[myroot][tree]
121 self.trees[myroot]["vartree"] = \
122 FakeVartree(trees[myroot]["root_config"],
123 pkg_cache=self._pkg_cache,
124 pkg_root_config=self.roots[myroot])
125 self.pkgsettings[myroot] = portage.config(
126 clone=self.trees[myroot]["vartree"].settings)
128 self._required_set_names = set(["world"])
130 atoms = ' '.join(myopts.get("--exclude", [])).split()
131 self.excluded_pkgs = _wildcard_set(atoms)
132 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
133 self.reinstall_atoms = _wildcard_set(atoms)
134 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
135 self.usepkg_exclude = _wildcard_set(atoms)
136 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
137 self.useoldpkg_atoms = _wildcard_set(atoms)
138 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
139 self.rebuild_exclude = _wildcard_set(atoms)
140 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
141 self.rebuild_ignore = _wildcard_set(atoms)
143 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
144 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
145 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
147 class _depgraph_sets(object):
149 # contains all sets added to the graph
151 # contains non-set atoms given as arguments
152 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
153 # contains all atoms from all sets added to the graph, including
154 # atoms given as arguments
155 self.atoms = InternalPackageSet(allow_repo=True)
156 self.atom_arg_map = {}
158 class _rebuild_config(object):
159 def __init__(self, frozen_config, backtrack_parameters):
160 self._graph = digraph()
161 self._frozen_config = frozen_config
162 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
163 self.orig_rebuild_list = self.rebuild_list.copy()
164 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
165 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
166 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
167 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
168 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
169 self.rebuild_if_unbuilt)
171 def add(self, dep_pkg, dep):
172 parent = dep.collapsed_parent
173 priority = dep.collapsed_priority
174 rebuild_exclude = self._frozen_config.rebuild_exclude
175 rebuild_ignore = self._frozen_config.rebuild_ignore
176 if (self.rebuild and isinstance(parent, Package) and
177 parent.built and (priority.buildtime or priority.runtime) and
178 isinstance(dep_pkg, Package) and
179 not rebuild_exclude.findAtomForPackage(parent) and
180 not rebuild_ignore.findAtomForPackage(dep_pkg)):
181 self._graph.add(dep_pkg, parent, priority)
183 def _needs_rebuild(self, dep_pkg):
184 """Check whether packages that depend on dep_pkg need to be rebuilt."""
185 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
186 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
189 if self.rebuild_if_unbuilt:
190 # dep_pkg is being installed from source, so binary
191 # packages for parents are invalid. Force rebuild
194 trees = self._frozen_config.trees
195 vardb = trees[dep_pkg.root]["vartree"].dbapi
196 if self.rebuild_if_new_rev:
197 # Parent packages are valid if a package with the same
198 # cpv is already installed.
199 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
201 # Otherwise, parent packages are valid if a package with the same
202 # version (excluding revision) is already installed.
203 assert self.rebuild_if_new_ver
204 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
205 for inst_cpv in vardb.match(dep_pkg.slot_atom):
206 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
207 if inst_cpv_norev == cpv_norev:
212 def _trigger_rebuild(self, parent, build_deps, runtime_deps):
213 root_slot = (parent.root, parent.slot_atom)
214 if root_slot in self.rebuild_list:
216 trees = self._frozen_config.trees
217 children = set(build_deps).intersection(runtime_deps)
219 for slot_atom in children:
220 kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
222 dep_root_slot = (dep_pkg.root, slot_atom)
223 if self._needs_rebuild(dep_pkg):
224 self.rebuild_list.add(root_slot)
226 elif ("--usepkg" in self._frozen_config.myopts and
227 (dep_root_slot in self.reinstall_list or
228 dep_root_slot in self.rebuild_list or
229 not dep_pkg.installed)):
231 # A direct rebuild dependency is being installed. We
232 # should update the parent as well to the latest binary,
233 # if that binary is valid.
235 # To validate the binary, we check whether all of the
236 # rebuild dependencies are present on the same binhost.
238 # 1) If parent is present on the binhost, but one of its
239 # rebuild dependencies is not, then the parent should
240 # be rebuilt from source.
241 # 2) Otherwise, the parent binary is assumed to be valid,
242 # because all of its rebuild dependencies are
244 bintree = trees[parent.root]["bintree"]
245 uri = bintree.get_pkgindex_uri(parent.cpv)
246 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
247 bindb = bintree.dbapi
248 if self.rebuild_if_new_ver and uri and uri != dep_uri:
249 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
250 for cpv in bindb.match(dep_pkg.slot_atom):
251 if cpv_norev == catpkgsplit(cpv)[:-1]:
252 dep_uri = bintree.get_pkgindex_uri(cpv)
255 if uri and uri != dep_uri:
256 # 1) Remote binary package is invalid because it was
257 # built without dep_pkg. Force rebuild.
258 self.rebuild_list.add(root_slot)
260 elif (parent.installed and
261 root_slot not in self.reinstall_list):
262 inst_build_time = parent.metadata.get("BUILD_TIME")
264 bin_build_time, = bindb.aux_get(parent.cpv,
268 if bin_build_time != inst_build_time:
269 # 2) Remote binary package is valid, and local package
270 # is not up to date. Force reinstall.
273 self.reinstall_list.add(root_slot)
276 def trigger_rebuilds(self):
278 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
279 depends on pkgA at both build-time and run-time, pkgB needs to be
286 leaf_nodes = deque(graph.leaf_nodes())
288 def ignore_non_runtime(priority):
289 return not priority.runtime
291 def ignore_non_buildtime(priority):
292 return not priority.buildtime
294 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
295 # will always know which children are being rebuilt.
296 while not graph.empty():
298 # We're interested in intersection of buildtime and runtime,
299 # so ignore edges that do not contain both.
300 leaf_nodes.extend(graph.leaf_nodes(
301 ignore_priority=ignore_non_runtime))
303 leaf_nodes.extend(graph.leaf_nodes(
304 ignore_priority=ignore_non_buildtime))
306 # We'll have to drop an edge that is both
307 # buildtime and runtime. This should be
309 leaf_nodes.append(graph.order[-1])
311 node = leaf_nodes.popleft()
312 if node not in graph:
313 # This can be triggered by circular dependencies.
315 slot_atom = node.slot_atom
317 # Remove our leaf node from the graph, keeping track of deps.
318 parents = graph.nodes[node][1].items()
320 node_build_deps = build_deps.get(node, {})
321 node_runtime_deps = runtime_deps.get(node, {})
322 for parent, priorities in parents:
324 # Ignore a direct cycle.
326 parent_bdeps = build_deps.setdefault(parent, {})
327 parent_rdeps = runtime_deps.setdefault(parent, {})
328 for priority in priorities:
329 if priority.buildtime:
330 parent_bdeps[slot_atom] = node
332 parent_rdeps[slot_atom] = node
333 if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
334 parent_rdeps.update(node_runtime_deps)
335 if not graph.child_nodes(parent):
336 leaf_nodes.append(parent)
338 # Trigger rebuilds for our leaf node. Because all of our children
339 # have been processed, build_deps and runtime_deps will be
340 # completely filled in, and self.rebuild_list / self.reinstall_list
341 # will tell us whether any of our children need to be rebuilt or
343 if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
349 class _dynamic_depgraph_config(object):
351 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
352 self.myparams = myparams.copy()
353 self._vdb_loaded = False
354 self._allow_backtracking = allow_backtracking
355 # Maps slot atom to package for each Package added to the graph.
356 self._slot_pkg_map = {}
357 # Maps nodes to the reasons they were selected for reinstallation.
358 self._reinstall_nodes = {}
360 # Contains a filtered view of preferred packages that are selected
361 # from available repositories.
362 self._filtered_trees = {}
363 # Contains installed packages and new packages that have been added
365 self._graph_trees = {}
366 # Caches visible packages returned from _select_package, for use in
367 # depgraph._iter_atoms_for_pkg() SLOT logic.
368 self._visible_pkgs = {}
369 #contains the args created by select_files
370 self._initial_arg_list = []
371 self.digraph = portage.digraph()
372 # manages sets added to the graph
374 # contains all nodes pulled in by self.sets
375 self._set_nodes = set()
376 # Contains only Blocker -> Uninstall edges
377 self._blocker_uninstalls = digraph()
378 # Contains only Package -> Blocker edges
379 self._blocker_parents = digraph()
380 # Contains only irrelevant Package -> Blocker edges
381 self._irrelevant_blockers = digraph()
382 # Contains only unsolvable Package -> Blocker edges
383 self._unsolvable_blockers = digraph()
384 # Contains all Blocker -> Blocked Package edges
385 self._blocked_pkgs = digraph()
386 # Contains world packages that have been protected from
387 # uninstallation but may not have been added to the graph
388 # if the graph is not complete yet.
389 self._blocked_world_pkgs = {}
390 # Contains packages whose dependencies have been traversed.
391 # This use used to check if we have accounted for blockers
392 # relevant to a package.
393 self._traversed_pkg_deps = set()
394 self._slot_collision_info = {}
395 # Slot collision nodes are not allowed to block other packages since
396 # blocker validation is only able to account for one package per slot.
397 self._slot_collision_nodes = set()
398 self._parent_atoms = {}
399 self._slot_conflict_parent_atoms = set()
400 self._slot_conflict_handler = None
401 self._circular_dependency_handler = None
402 self._serialized_tasks_cache = None
403 self._scheduler_graph = None
404 self._displayed_list = None
405 self._pprovided_args = []
406 self._missing_args = []
407 self._masked_installed = set()
408 self._masked_license_updates = set()
409 self._unsatisfied_deps_for_display = []
410 self._unsatisfied_blockers_for_display = None
411 self._circular_deps_for_display = None
413 self._dep_disjunctive_stack = []
414 self._unsatisfied_deps = []
415 self._initially_unsatisfied_deps = []
416 self._ignored_deps = []
417 self._highest_pkg_cache = {}
419 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
420 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
421 self._needed_license_changes = backtrack_parameters.needed_license_changes
422 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
423 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
424 self._need_restart = False
425 # For conditions that always require user intervention, such as
426 # unsatisfied REQUIRED_USE (currently has no autounmask support).
427 self._skip_restart = False
428 self._backtrack_infos = {}
430 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
431 self._success_without_autounmask = False
432 self._traverse_ignored_deps = False
434 for myroot in depgraph._frozen_config.trees:
435 self.sets[myroot] = _depgraph_sets()
436 self._slot_pkg_map[myroot] = {}
437 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
438 # This dbapi instance will model the state that the vdb will
439 # have after new packages have been installed.
440 fakedb = PackageVirtualDbapi(vardb.settings)
442 self.mydbapi[myroot] = fakedb
445 graph_tree.dbapi = fakedb
446 self._graph_trees[myroot] = {}
447 self._filtered_trees[myroot] = {}
448 # Substitute the graph tree for the vartree in dep_check() since we
449 # want atom selections to be consistent with package selections
450 # have already been made.
451 self._graph_trees[myroot]["porttree"] = graph_tree
452 self._graph_trees[myroot]["vartree"] = graph_tree
455 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
456 self._filtered_trees[myroot]["porttree"] = filtered_tree
457 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
459 # Passing in graph_tree as the vartree here could lead to better
460 # atom selections in some cases by causing atoms for packages that
461 # have been added to the graph to be preferred over other choices.
462 # However, it can trigger atom selections that result in
463 # unresolvable direct circular dependencies. For example, this
464 # happens with gwydion-dylan which depends on either itself or
465 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
466 # gwydion-dylan-bin needs to be selected in order to avoid a
467 # an unresolvable direct circular dependency.
469 # To solve the problem described above, pass in "graph_db" so that
470 # packages that have been added to the graph are distinguishable
471 # from other available packages and installed packages. Also, pass
472 # the parent package into self._select_atoms() calls so that
473 # unresolvable direct circular dependencies can be detected and
474 # avoided when possible.
475 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
476 self._filtered_trees[myroot]["vartree"] = \
477 depgraph._frozen_config.trees[myroot]["vartree"]
480 # (db, pkg_type, built, installed, db_keys)
481 if "remove" in self.myparams:
482 # For removal operations, use _dep_check_composite_db
483 # for availability and visibility checks. This provides
484 # consistency with install operations, so we don't
485 # get install/uninstall cycles like in bug #332719.
486 self._graph_trees[myroot]["porttree"] = filtered_tree
488 if "--usepkgonly" not in depgraph._frozen_config.myopts:
489 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
490 db_keys = list(portdb._aux_cache_keys)
491 dbs.append((portdb, "ebuild", False, False, db_keys))
493 if "--usepkg" in depgraph._frozen_config.myopts:
494 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
495 db_keys = list(bindb._aux_cache_keys)
496 dbs.append((bindb, "binary", True, False, db_keys))
498 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
499 db_keys = list(depgraph._frozen_config._trees_orig[myroot
500 ]["vartree"].dbapi._aux_cache_keys)
501 dbs.append((vardb, "installed", True, True, db_keys))
502 self._filtered_trees[myroot]["dbs"] = dbs
504 class depgraph(object):
506 pkg_tree_map = RootConfig.pkg_tree_map
508 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
510 def __init__(self, settings, trees, myopts, myparams, spinner,
511 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
512 if frozen_config is None:
513 frozen_config = _frozen_depgraph_config(settings, trees,
515 self._frozen_config = frozen_config
516 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
517 allow_backtracking, backtrack_parameters)
518 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
520 self._select_atoms = self._select_atoms_highest_available
521 self._select_package = self._select_pkg_highest_available
525 Load installed package metadata if appropriate. This used to be called
526 from the constructor, but that wasn't very nice since this procedure
527 is slow and it generates spinner output. So, now it's called on-demand
528 by various methods when necessary.
531 if self._dynamic_config._vdb_loaded:
534 for myroot in self._frozen_config.trees:
536 preload_installed_pkgs = \
537 "--nodeps" not in self._frozen_config.myopts
539 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
540 if not fake_vartree.dbapi:
541 # This needs to be called for the first depgraph, but not for
542 # backtracking depgraphs that share the same frozen_config.
545 # FakeVartree.sync() populates virtuals, and we want
546 # self.pkgsettings to have them populated too.
547 self._frozen_config.pkgsettings[myroot] = \
548 portage.config(clone=fake_vartree.settings)
550 if preload_installed_pkgs:
551 vardb = fake_vartree.dbapi
552 fakedb = self._dynamic_config._graph_trees[
553 myroot]["vartree"].dbapi
556 self._spinner_update()
557 # This triggers metadata updates via FakeVartree.
558 vardb.aux_get(pkg.cpv, [])
559 fakedb.cpv_inject(pkg)
561 self._dynamic_config._vdb_loaded = True
563 def _spinner_update(self):
564 if self._frozen_config.spinner:
565 self._frozen_config.spinner.update()
567 def _show_missed_update(self):
569 # In order to minimize noise, show only the highest
570 # missed update from each SLOT.
572 for pkg, mask_reasons in \
573 self._dynamic_config._runtime_pkg_mask.items():
575 # Exclude installed here since we only
576 # want to show available updates.
578 k = (pkg.root, pkg.slot_atom)
579 if k in missed_updates:
580 other_pkg, mask_type, parent_atoms = missed_updates[k]
583 for mask_type, parent_atoms in mask_reasons.items():
586 missed_updates[k] = (pkg, mask_type, parent_atoms)
589 if not missed_updates:
592 missed_update_types = {}
593 for pkg, mask_type, parent_atoms in missed_updates.values():
594 missed_update_types.setdefault(mask_type,
595 []).append((pkg, parent_atoms))
597 if '--quiet' in self._frozen_config.myopts and \
598 '--debug' not in self._frozen_config.myopts:
599 missed_update_types.pop("slot conflict", None)
600 missed_update_types.pop("missing dependency", None)
602 self._show_missed_update_slot_conflicts(
603 missed_update_types.get("slot conflict"))
605 self._show_missed_update_unsatisfied_dep(
606 missed_update_types.get("missing dependency"))
608 def _show_missed_update_unsatisfied_dep(self, missed_updates):
610 if not missed_updates:
613 backtrack_masked = []
615 for pkg, parent_atoms in missed_updates:
618 for parent, root, atom in parent_atoms:
619 self._show_unsatisfied_dep(root, atom, myparent=parent,
620 check_backtrack=True)
621 except self._backtrack_mask:
622 # This is displayed below in abbreviated form.
623 backtrack_masked.append((pkg, parent_atoms))
626 writemsg("\n!!! The following update has been skipped " + \
627 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
629 writemsg(str(pkg.slot_atom), noiselevel=-1)
631 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
632 writemsg("\n", noiselevel=-1)
634 for parent, root, atom in parent_atoms:
635 self._show_unsatisfied_dep(root, atom, myparent=parent)
636 writemsg("\n", noiselevel=-1)
639 # These are shown in abbreviated form, in order to avoid terminal
640 # flooding from mask messages as reported in bug #285832.
641 writemsg("\n!!! The following update(s) have been skipped " + \
642 "due to unsatisfied dependencies\n" + \
643 "!!! triggered by backtracking:\n\n", noiselevel=-1)
644 for pkg, parent_atoms in backtrack_masked:
645 writemsg(str(pkg.slot_atom), noiselevel=-1)
647 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
648 writemsg("\n", noiselevel=-1)
650 def _show_missed_update_slot_conflicts(self, missed_updates):
652 if not missed_updates:
656 msg.append("\n!!! One or more updates have been skipped due to " + \
657 "a dependency conflict:\n\n")
660 for pkg, parent_atoms in missed_updates:
661 msg.append(str(pkg.slot_atom))
663 msg.append(" for %s" % (pkg.root,))
666 for parent, atom in parent_atoms:
670 msg.append(" conflicts with\n")
672 if isinstance(parent,
673 (PackageArg, AtomArg)):
674 # For PackageArg and AtomArg types, it's
675 # redundant to display the atom attribute.
676 msg.append(str(parent))
678 # Display the specific atom from SetArg or
680 msg.append("%s required by %s" % (atom, parent))
684 writemsg("".join(msg), noiselevel=-1)
686 def _show_slot_collision_notice(self):
687 """Show an informational message advising the user to mask one of the
688 the packages. In some cases it may be possible to resolve this
689 automatically, but support for backtracking (removal nodes that have
690 already been selected) will be required in order to handle all possible
694 if not self._dynamic_config._slot_collision_info:
697 self._show_merge_list()
699 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
700 handler = self._dynamic_config._slot_conflict_handler
702 conflict = handler.get_conflict()
703 writemsg(conflict, noiselevel=-1)
705 explanation = handler.get_explanation()
707 writemsg(explanation, noiselevel=-1)
710 if "--quiet" in self._frozen_config.myopts:
714 msg.append("It may be possible to solve this problem ")
715 msg.append("by using package.mask to prevent one of ")
716 msg.append("those packages from being selected. ")
717 msg.append("However, it is also possible that conflicting ")
718 msg.append("dependencies exist such that they are impossible to ")
719 msg.append("satisfy simultaneously. If such a conflict exists in ")
720 msg.append("the dependencies of two different packages, then those ")
721 msg.append("packages can not be installed simultaneously.")
722 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
723 if not self._dynamic_config._allow_backtracking and \
724 (backtrack_opt is None or \
725 (backtrack_opt > 0 and backtrack_opt < 30)):
726 msg.append(" You may want to try a larger value of the ")
727 msg.append("--backtrack option, such as --backtrack=30, ")
728 msg.append("in order to see if that will solve this conflict ")
729 msg.append("automatically.")
731 for line in textwrap.wrap(''.join(msg), 70):
732 writemsg(line + '\n', noiselevel=-1)
733 writemsg('\n', noiselevel=-1)
736 msg.append("For more information, see MASKED PACKAGES ")
737 msg.append("section in the emerge man page or refer ")
738 msg.append("to the Gentoo Handbook.")
739 for line in textwrap.wrap(''.join(msg), 70):
740 writemsg(line + '\n', noiselevel=-1)
741 writemsg('\n', noiselevel=-1)
743 def _process_slot_conflicts(self):
745 Process slot conflict data to identify specific atoms which
746 lead to conflict. These atoms only match a subset of the
747 packages that have been pulled into a given slot.
749 for (slot_atom, root), slot_nodes \
750 in self._dynamic_config._slot_collision_info.items():
752 all_parent_atoms = set()
753 for pkg in slot_nodes:
754 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
757 all_parent_atoms.update(parent_atoms)
759 for pkg in slot_nodes:
760 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
761 if parent_atoms is None:
763 self._dynamic_config._parent_atoms[pkg] = parent_atoms
764 for parent_atom in all_parent_atoms:
765 if parent_atom in parent_atoms:
767 # Use package set for matching since it will match via
768 # PROVIDE when necessary, while match_from_list does not.
769 parent, atom = parent_atom
770 atom_set = InternalPackageSet(
771 initial_atoms=(atom,), allow_repo=True)
772 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
773 parent_atoms.add(parent_atom)
775 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
777 def _reinstall_for_flags(self, forced_flags,
778 orig_use, orig_iuse, cur_use, cur_iuse):
779 """Return a set of flags that trigger reinstallation, or None if there
780 are no such flags."""
781 if "--newuse" in self._frozen_config.myopts or \
782 "--binpkg-respect-use" in self._frozen_config.myopts:
783 flags = set(orig_iuse.symmetric_difference(
784 cur_iuse).difference(forced_flags))
785 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
786 cur_iuse.intersection(cur_use)))
789 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
790 flags = orig_iuse.intersection(orig_use).symmetric_difference(
791 cur_iuse.intersection(cur_use))
796 def _create_graph(self, allow_unsatisfied=False):
797 dep_stack = self._dynamic_config._dep_stack
798 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
799 while dep_stack or dep_disjunctive_stack:
800 self._spinner_update()
802 dep = dep_stack.pop()
803 if isinstance(dep, Package):
804 if not self._add_pkg_deps(dep,
805 allow_unsatisfied=allow_unsatisfied):
808 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
810 if dep_disjunctive_stack:
811 if not self._pop_disjunction(allow_unsatisfied):
815 def _expand_set_args(self, input_args, add_to_digraph=False):
817 Iterate over a list of DependencyArg instances and yield all
818 instances given in the input together with additional SetArg
819 instances that are generated from nested sets.
820 @param input_args: An iterable of DependencyArg instances
821 @type input_args: Iterable
822 @param add_to_digraph: If True then add SetArg instances
823 to the digraph, in order to record parent -> child
824 relationships from nested sets
825 @type add_to_digraph: Boolean
827 @returns: All args given in the input together with additional
828 SetArg instances that are generated from nested sets
831 traversed_set_args = set()
833 for arg in input_args:
834 if not isinstance(arg, SetArg):
838 root_config = arg.root_config
839 depgraph_sets = self._dynamic_config.sets[root_config.root]
842 arg = arg_stack.pop()
843 if arg in traversed_set_args:
845 traversed_set_args.add(arg)
848 # Traverse nested sets and add them to the stack
849 # if they're not already in the graph. Also, graph
850 # edges between parent and nested sets.
851 for token in arg.pset.getNonAtoms():
852 if not token.startswith(SETPREFIX):
854 s = token[len(SETPREFIX):]
855 nested_set = depgraph_sets.sets.get(s)
856 if nested_set is None:
857 nested_set = root_config.sets.get(s)
858 if nested_set is not None:
859 nested_arg = SetArg(arg=token, pset=nested_set,
860 root_config=root_config)
861 arg_stack.append(nested_arg)
863 self._dynamic_config.digraph.add(nested_arg, arg,
864 priority=BlockerDepPriority.instance)
865 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
867 def _add_dep(self, dep, allow_unsatisfied=False):
868 debug = "--debug" in self._frozen_config.myopts
869 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
870 nodeps = "--nodeps" in self._frozen_config.myopts
871 deep = self._dynamic_config.myparams.get("deep", 0)
872 recurse = deep is True or dep.depth <= deep
874 if not buildpkgonly and \
876 not dep.collapsed_priority.ignored and \
877 not dep.collapsed_priority.optional and \
878 dep.parent not in self._dynamic_config._slot_collision_nodes:
879 if dep.parent.onlydeps:
880 # It's safe to ignore blockers if the
881 # parent is an --onlydeps node.
883 # The blocker applies to the root where
884 # the parent is or will be installed.
885 blocker = Blocker(atom=dep.atom,
886 eapi=dep.parent.metadata["EAPI"],
887 priority=dep.priority, root=dep.parent.root)
888 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
891 if dep.child is None:
892 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
893 onlydeps=dep.onlydeps)
895 # The caller has selected a specific package
896 # via self._minimize_packages().
898 existing_node = self._dynamic_config._slot_pkg_map[
899 dep.root].get(dep_pkg.slot_atom)
902 if (dep.collapsed_priority.optional or
903 dep.collapsed_priority.ignored):
904 # This is an unnecessary build-time dep.
906 if allow_unsatisfied:
907 self._dynamic_config._unsatisfied_deps.append(dep)
909 self._dynamic_config._unsatisfied_deps_for_display.append(
910 ((dep.root, dep.atom), {"myparent":dep.parent}))
912 # The parent node should not already be in
913 # runtime_pkg_mask, since that would trigger an
914 # infinite backtracking loop.
915 if self._dynamic_config._allow_backtracking:
916 if dep.parent in self._dynamic_config._runtime_pkg_mask:
917 if "--debug" in self._frozen_config.myopts:
919 "!!! backtracking loop detected: %s %s\n" % \
921 self._dynamic_config._runtime_pkg_mask[
922 dep.parent]), noiselevel=-1)
923 elif not self.need_restart():
924 # Do not backtrack if only USE have to be changed in
925 # order to satisfy the dependency.
926 dep_pkg, existing_node = \
927 self._select_package(dep.root, dep.atom.without_use,
928 onlydeps=dep.onlydeps)
930 self._dynamic_config._backtrack_infos["missing dependency"] = dep
931 self._dynamic_config._need_restart = True
932 if "--debug" in self._frozen_config.myopts:
936 msg.append("backtracking due to unsatisfied dep:")
937 msg.append(" parent: %s" % dep.parent)
938 msg.append(" priority: %s" % dep.priority)
939 msg.append(" root: %s" % dep.root)
940 msg.append(" atom: %s" % dep.atom)
942 writemsg_level("".join("%s\n" % l for l in msg),
943 noiselevel=-1, level=logging.DEBUG)
947 self._rebuild.add(dep_pkg, dep)
949 ignore = dep.collapsed_priority.ignored and \
950 not self._dynamic_config._traverse_ignored_deps
951 if not ignore and not self._add_pkg(dep_pkg, dep):
955 def _check_slot_conflict(self, pkg, atom):
956 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
959 matches = pkg.cpv == existing_node.cpv
960 if pkg != existing_node and \
962 # Use package set for matching since it will match via
963 # PROVIDE when necessary, while match_from_list does not.
964 matches = bool(InternalPackageSet(initial_atoms=(atom,),
965 allow_repo=True).findAtomForPackage(existing_node,
966 modified_use=self._pkg_use_enabled(existing_node)))
968 return (existing_node, matches)
970 def _add_pkg(self, pkg, dep):
977 myparent = dep.parent
978 priority = dep.priority
981 priority = DepPriority()
983 Fills the digraph with nodes comprised of packages to merge.
984 mybigkey is the package spec of the package to merge.
985 myparent is the package depending on mybigkey ( or None )
986 addme = Should we add this package to the digraph or are we just looking at it's deps?
987 Think --onlydeps, we need to ignore packages in that case.
990 #IUSE-aware emerge -> USE DEP aware depgraph
991 #"no downgrade" emerge
993 # Ensure that the dependencies of the same package
994 # are never processed more than once.
995 previously_added = pkg in self._dynamic_config.digraph
997 # select the correct /var database that we'll be checking against
998 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
999 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1004 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1005 except portage.exception.InvalidDependString as e:
1006 if not pkg.installed:
1007 # should have been masked before it was selected
1011 # NOTE: REQUIRED_USE checks are delayed until after
1012 # package selection, since we want to prompt the user
1013 # for USE adjustment rather than have REQUIRED_USE
1014 # affect package selection and || dep choices.
1015 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
1016 eapi_has_required_use(pkg.metadata["EAPI"]):
1017 required_use_is_sat = check_required_use(
1018 pkg.metadata["REQUIRED_USE"],
1019 self._pkg_use_enabled(pkg),
1020 pkg.iuse.is_valid_flag)
1021 if not required_use_is_sat:
1022 if dep.atom is not None and dep.parent is not None:
1023 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1026 for parent_atom in arg_atoms:
1027 parent, atom = parent_atom
1028 self._add_parent_atom(pkg, parent_atom)
1032 atom = Atom("=" + pkg.cpv)
1033 self._dynamic_config._unsatisfied_deps_for_display.append(
1034 ((pkg.root, atom), {"myparent":dep.parent}))
1035 self._dynamic_config._skip_restart = True
1038 if not pkg.onlydeps:
1040 existing_node, existing_node_matches = \
1041 self._check_slot_conflict(pkg, dep.atom)
1042 slot_collision = False
1044 if existing_node_matches:
1045 # The existing node can be reused.
1047 for parent_atom in arg_atoms:
1048 parent, atom = parent_atom
1049 self._dynamic_config.digraph.add(existing_node, parent,
1051 self._add_parent_atom(existing_node, parent_atom)
1052 # If a direct circular dependency is not an unsatisfied
1053 # buildtime dependency then drop it here since otherwise
1054 # it can skew the merge order calculation in an unwanted
1056 if existing_node != myparent or \
1057 (priority.buildtime and not priority.satisfied):
1058 self._dynamic_config.digraph.addnode(existing_node, myparent,
1060 if dep.atom is not None and dep.parent is not None:
1061 self._add_parent_atom(existing_node,
1062 (dep.parent, dep.atom))
1065 # A slot conflict has occurred.
1066 # The existing node should not already be in
1067 # runtime_pkg_mask, since that would trigger an
1068 # infinite backtracking loop.
1069 if self._dynamic_config._allow_backtracking and \
1071 self._dynamic_config._runtime_pkg_mask:
1072 if "--debug" in self._frozen_config.myopts:
1074 "!!! backtracking loop detected: %s %s\n" % \
1076 self._dynamic_config._runtime_pkg_mask[
1077 existing_node]), noiselevel=-1)
1078 elif self._dynamic_config._allow_backtracking and \
1079 not self._accept_blocker_conflicts() and \
1080 not self.need_restart():
1082 self._add_slot_conflict(pkg)
1083 if dep.atom is not None and dep.parent is not None:
1084 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1087 for parent_atom in arg_atoms:
1088 parent, atom = parent_atom
1089 self._add_parent_atom(pkg, parent_atom)
1090 self._process_slot_conflicts()
1095 # The ordering of backtrack_data can make
1096 # a difference here, because both mask actions may lead
1097 # to valid, but different, solutions and the one with
1098 # 'existing_node' masked is usually the better one. Because
1099 # of that, we choose an order such that
1100 # the backtracker will first explore the choice with
1101 # existing_node masked. The backtracker reverses the
1102 # order, so the order it uses is the reverse of the
1103 # order shown here. See bug #339606.
1104 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
1105 # For missed update messages, find out which
1106 # atoms matched to_be_selected that did not
1107 # match to_be_masked.
1109 self._dynamic_config._parent_atoms.get(to_be_selected, set())
1111 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
1113 parent_atoms = conflict_atoms
1115 all_parents.update(parent_atoms)
1118 for parent, atom in parent_atoms:
1119 i = InternalPackageSet(initial_atoms=(atom,),
1121 if not i.findAtomForPackage(to_be_masked):
1125 if to_be_selected >= to_be_masked:
1126 # We only care about the parent atoms
1127 # when they trigger a downgrade.
1128 parent_atoms = set()
1130 fallback_data.append((to_be_masked, parent_atoms))
1133 # 'to_be_masked' does not violate any parent atom, which means
1134 # there is no point in masking it.
1137 backtrack_data.append((to_be_masked, parent_atoms))
1139 if not backtrack_data:
1140 # This shouldn't happen, but fall back to the old
1141 # behavior if this gets triggered somehow.
1142 backtrack_data = fallback_data
1144 if len(backtrack_data) > 1:
1145 # NOTE: Generally, we prefer to mask the higher
1146 # version since this solves common cases in which a
1147 # lower version is needed so that all dependencies
1148 # will be satisfied (bug #337178). However, if
1149 # existing_node happens to be installed then we
1150 # mask that since this is a common case that is
1151 # triggered when --update is not enabled.
1152 if existing_node.installed:
1154 elif pkg > existing_node:
1155 backtrack_data.reverse()
1157 to_be_masked = backtrack_data[-1][0]
1159 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
1160 self._dynamic_config._need_restart = True
1161 if "--debug" in self._frozen_config.myopts:
1165 msg.append("backtracking due to slot conflict:")
1166 if backtrack_data is fallback_data:
1167 msg.append("!!! backtrack_data fallback")
1168 msg.append(" first package: %s" % existing_node)
1169 msg.append(" second package: %s" % pkg)
1170 msg.append(" package to mask: %s" % to_be_masked)
1171 msg.append(" slot: %s" % pkg.slot_atom)
1172 msg.append(" parents: %s" % ", ".join( \
1173 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1175 writemsg_level("".join("%s\n" % l for l in msg),
1176 noiselevel=-1, level=logging.DEBUG)
1179 # A slot collision has occurred. Sometimes this coincides
1180 # with unresolvable blockers, so the slot collision will be
1181 # shown later if there are no unresolvable blockers.
1182 self._add_slot_conflict(pkg)
1183 slot_collision = True
1186 # Now add this node to the graph so that self.display()
1187 # can show use flags and --tree portage.output. This node is
1188 # only being partially added to the graph. It must not be
1189 # allowed to interfere with the other nodes that have been
1190 # added. Do not overwrite data for existing nodes in
1191 # self._dynamic_config.mydbapi since that data will be used for blocker
1193 # Even though the graph is now invalid, continue to process
1194 # dependencies so that things like --fetchonly can still
1195 # function despite collisions.
1197 elif not previously_added:
1198 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1199 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1200 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1201 self._dynamic_config._highest_pkg_cache.clear()
1202 self._check_masks(pkg)
1204 if not pkg.installed:
1205 # Allow this package to satisfy old-style virtuals in case it
1206 # doesn't already. Any pre-existing providers will be preferred
1209 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1210 # For consistency, also update the global virtuals.
1211 settings = self._frozen_config.roots[pkg.root].settings
1213 settings.setinst(pkg.cpv, pkg.metadata)
1215 except portage.exception.InvalidDependString as e:
1216 if not pkg.installed:
1217 # should have been masked before it was selected
1221 self._dynamic_config._set_nodes.add(pkg)
1223 # Do this even when addme is False (--onlydeps) so that the
1224 # parent/child relationship is always known in case
1225 # self._show_slot_collision_notice() needs to be called later.
1226 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1227 if dep.atom is not None and dep.parent is not None:
1228 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1231 for parent_atom in arg_atoms:
1232 parent, atom = parent_atom
1233 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1234 self._add_parent_atom(pkg, parent_atom)
1236 """ This section determines whether we go deeper into dependencies or not.
1237 We want to go deeper on a few occasions:
1238 Installing package A, we need to make sure package A's deps are met.
1239 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1240 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1245 deep = self._dynamic_config.myparams.get("deep", 0)
1246 recurse = deep is True or depth + 1 <= deep
1247 dep_stack = self._dynamic_config._dep_stack
1248 if "recurse" not in self._dynamic_config.myparams:
1250 elif pkg.installed and not recurse:
1251 dep_stack = self._dynamic_config._ignored_deps
1253 self._spinner_update()
1255 if not previously_added:
1256 dep_stack.append(pkg)
1259 def _check_masks(self, pkg):
1261 slot_key = (pkg.root, pkg.slot_atom)
1263 # Check for upgrades in the same slot that are
1264 # masked due to a LICENSE change in a newer
1265 # version that is not masked for any other reason.
1266 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1267 if other_pkg is not None and pkg < other_pkg:
1268 self._dynamic_config._masked_license_updates.add(other_pkg)
1270 def _add_parent_atom(self, pkg, parent_atom):
1271 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1272 if parent_atoms is None:
1273 parent_atoms = set()
1274 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1275 parent_atoms.add(parent_atom)
1277 def _add_slot_conflict(self, pkg):
1278 self._dynamic_config._slot_collision_nodes.add(pkg)
1279 slot_key = (pkg.slot_atom, pkg.root)
1280 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1281 if slot_nodes is None:
1283 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1284 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1287 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1289 mytype = pkg.type_name
1292 metadata = pkg.metadata
1293 myuse = self._pkg_use_enabled(pkg)
1295 depth = pkg.depth + 1
1296 removal_action = "remove" in self._dynamic_config.myparams
1299 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1301 edepend[k] = metadata[k]
1303 if not pkg.built and \
1304 "--buildpkgonly" in self._frozen_config.myopts and \
1305 "deep" not in self._dynamic_config.myparams:
1306 edepend["RDEPEND"] = ""
1307 edepend["PDEPEND"] = ""
1309 ignore_build_time_deps = False
1310 if pkg.built and not removal_action:
1311 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1312 # Pull in build time deps as requested, but marked them as
1313 # "optional" since they are not strictly required. This allows
1314 # more freedom in the merge order calculation for solving
1315 # circular dependencies. Don't convert to PDEPEND since that
1316 # could make --with-bdeps=y less effective if it is used to
1317 # adjust merge order to prevent built_with_use() calls from
1321 ignore_build_time_deps = True
1323 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1324 # Removal actions never traverse ignored buildtime
1325 # dependencies, so it's safe to discard them early.
1326 edepend["DEPEND"] = ""
1327 ignore_build_time_deps = True
1330 depend_root = myroot
1333 root_deps = self._frozen_config.myopts.get("--root-deps")
1334 if root_deps is not None:
1335 if root_deps is True:
1336 depend_root = myroot
1337 elif root_deps == "rdeps":
1338 ignore_build_time_deps = True
1340 # If rebuild mode is not enabled, it's safe to discard ignored
1341 # build-time dependencies. If you want these deps to be traversed
1342 # in "complete" mode then you need to specify --with-bdeps=y.
1343 if ignore_build_time_deps and \
1344 not self._rebuild.rebuild:
1345 edepend["DEPEND"] = ""
1348 (depend_root, edepend["DEPEND"],
1349 self._priority(buildtime=True,
1350 optional=(pkg.built or ignore_build_time_deps),
1351 ignored=ignore_build_time_deps)),
1352 (myroot, edepend["RDEPEND"],
1353 self._priority(runtime=True)),
1354 (myroot, edepend["PDEPEND"],
1355 self._priority(runtime_post=True))
1358 debug = "--debug" in self._frozen_config.myopts
1359 strict = mytype != "installed"
1361 for dep_root, dep_string, dep_priority in deps:
1365 writemsg_level("\nParent: %s\n" % (pkg,),
1366 noiselevel=-1, level=logging.DEBUG)
1367 writemsg_level("Depstring: %s\n" % (dep_string,),
1368 noiselevel=-1, level=logging.DEBUG)
1369 writemsg_level("Priority: %s\n" % (dep_priority,),
1370 noiselevel=-1, level=logging.DEBUG)
1373 dep_string = portage.dep.use_reduce(dep_string,
1374 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1375 except portage.exception.InvalidDependString as e:
1376 if not pkg.installed:
1377 # should have been masked before it was selected
1381 # Try again, but omit the is_valid_flag argument, since
1382 # invalid USE conditionals are a common problem and it's
1383 # practical to ignore this issue for installed packages.
1385 dep_string = portage.dep.use_reduce(dep_string,
1386 uselist=self._pkg_use_enabled(pkg))
1387 except portage.exception.InvalidDependString as e:
1388 self._dynamic_config._masked_installed.add(pkg)
1393 dep_string = list(self._queue_disjunctive_deps(
1394 pkg, dep_root, dep_priority, dep_string))
1395 except portage.exception.InvalidDependString as e:
1397 self._dynamic_config._masked_installed.add(pkg)
1401 # should have been masked before it was selected
1407 dep_string = portage.dep.paren_enclose(dep_string,
1408 unevaluated_atom=True)
1410 if not self._add_pkg_dep_string(
1411 pkg, dep_root, dep_priority, dep_string,
1415 self._dynamic_config._traversed_pkg_deps.add(pkg)
1418 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1420 _autounmask_backup = self._dynamic_config._autounmask
1421 if dep_priority.optional or dep_priority.ignored:
1422 # Temporarily disable autounmask for deps that
1423 # don't necessarily need to be satisfied.
1424 self._dynamic_config._autounmask = False
1426 return self._wrapped_add_pkg_dep_string(
1427 pkg, dep_root, dep_priority, dep_string,
1430 self._dynamic_config._autounmask = _autounmask_backup
1432 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1433 dep_string, allow_unsatisfied):
1434 depth = pkg.depth + 1
1435 deep = self._dynamic_config.myparams.get("deep", 0)
1436 recurse_satisfied = deep is True or depth <= deep
1437 debug = "--debug" in self._frozen_config.myopts
1438 strict = pkg.type_name != "installed"
1441 writemsg_level("\nParent: %s\n" % (pkg,),
1442 noiselevel=-1, level=logging.DEBUG)
1443 writemsg_level("Depstring: %s\n" % (dep_string,),
1444 noiselevel=-1, level=logging.DEBUG)
1445 writemsg_level("Priority: %s\n" % (dep_priority,),
1446 noiselevel=-1, level=logging.DEBUG)
1449 selected_atoms = self._select_atoms(dep_root,
1450 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1451 strict=strict, priority=dep_priority)
1452 except portage.exception.InvalidDependString as e:
1454 self._dynamic_config._masked_installed.add(pkg)
1457 # should have been masked before it was selected
1461 writemsg_level("Candidates: %s\n" % \
1462 ([str(x) for x in selected_atoms[pkg]],),
1463 noiselevel=-1, level=logging.DEBUG)
1465 root_config = self._frozen_config.roots[dep_root]
1466 vardb = root_config.trees["vartree"].dbapi
1467 traversed_virt_pkgs = set()
1469 reinstall_atoms = self._frozen_config.reinstall_atoms
1470 for atom, child in self._minimize_children(
1471 pkg, dep_priority, root_config, selected_atoms[pkg]):
1473 # If this was a specially generated virtual atom
1474 # from dep_check, map it back to the original, in
1475 # order to avoid distortion in places like display
1476 # or conflict resolution code.
1477 is_virt = hasattr(atom, '_orig_atom')
1478 atom = getattr(atom, '_orig_atom', atom)
1480 if atom.blocker and \
1481 (dep_priority.optional or dep_priority.ignored):
1482 # For --with-bdeps, ignore build-time only blockers
1483 # that originate from built packages.
1486 mypriority = dep_priority.copy()
1487 if not atom.blocker:
1488 root_slot = (pkg.root, pkg.slot_atom)
1489 inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(atom)
1490 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1491 modified_use=self._pkg_use_enabled(inst_pkg))]
1493 for inst_pkg in inst_pkgs:
1494 if self._pkg_visibility_check(inst_pkg):
1496 mypriority.satisfied = inst_pkg
1498 if not mypriority.satisfied:
1499 # none visible, so use highest
1500 mypriority.satisfied = inst_pkgs[0]
1502 dep = Dependency(atom=atom,
1503 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1504 priority=mypriority, root=dep_root)
1506 # In some cases, dep_check will return deps that shouldn't
1507 # be proccessed any further, so they are identified and
1508 # discarded here. Try to discard as few as possible since
1509 # discarded dependencies reduce the amount of information
1510 # available for optimization of merge order.
1512 if not atom.blocker and \
1513 not recurse_satisfied and \
1514 mypriority.satisfied and \
1515 mypriority.satisfied.visible and \
1516 dep.child is not None and \
1517 not dep.child.installed and \
1518 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1519 dep.child.slot_atom) is None:
1521 if dep.root == self._frozen_config.target_root:
1523 myarg = next(self._iter_atoms_for_pkg(dep.child))
1524 except StopIteration:
1526 except InvalidDependString:
1527 if not dep.child.installed:
1528 # This shouldn't happen since the package
1529 # should have been masked.
1533 # Existing child selection may not be valid unless
1534 # it's added to the graph immediately, since "complete"
1535 # mode may select a different child later.
1538 self._dynamic_config._ignored_deps.append(dep)
1541 if dep_priority.ignored and \
1542 not self._dynamic_config._traverse_ignored_deps:
1543 if is_virt and dep.child is not None:
1544 traversed_virt_pkgs.add(dep.child)
1546 self._dynamic_config._ignored_deps.append(dep)
1548 if not self._add_dep(dep,
1549 allow_unsatisfied=allow_unsatisfied):
1551 if is_virt and dep.child is not None:
1552 traversed_virt_pkgs.add(dep.child)
1554 selected_atoms.pop(pkg)
1556 # Add selected indirect virtual deps to the graph. This
1557 # takes advantage of circular dependency avoidance that's done
1558 # by dep_zapdeps. We preserve actual parent/child relationships
1559 # here in order to avoid distorting the dependency graph like
1560 # <=portage-2.1.6.x did.
1561 for virt_dep, atoms in selected_atoms.items():
1563 virt_pkg = virt_dep.child
1564 if virt_pkg not in traversed_virt_pkgs:
1568 writemsg_level("Candidates: %s: %s\n" % \
1569 (virt_pkg.cpv, [str(x) for x in atoms]),
1570 noiselevel=-1, level=logging.DEBUG)
1572 if not dep_priority.ignored or \
1573 self._dynamic_config._traverse_ignored_deps:
1575 inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(virt_dep.atom)
1576 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1577 modified_use=self._pkg_use_enabled(inst_pkg))]
1579 for inst_pkg in inst_pkgs:
1580 if self._pkg_visibility_check(inst_pkg):
1582 virt_dep.priority.satisfied = inst_pkg
1584 if not virt_dep.priority.satisfied:
1585 # none visible, so use highest
1586 virt_dep.priority.satisfied = inst_pkgs[0]
1588 if not self._add_pkg(virt_pkg, virt_dep):
1591 for atom, child in self._minimize_children(
1592 pkg, self._priority(runtime=True), root_config, atoms):
1594 # If this was a specially generated virtual atom
1595 # from dep_check, map it back to the original, in
1596 # order to avoid distortion in places like display
1597 # or conflict resolution code.
1598 is_virt = hasattr(atom, '_orig_atom')
1599 atom = getattr(atom, '_orig_atom', atom)
1601 # This is a GLEP 37 virtual, so its deps are all runtime.
1602 mypriority = self._priority(runtime=True)
1603 if not atom.blocker:
1604 inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(atom)
1605 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1606 modified_use=self._pkg_use_enabled(inst_pkg))]
1608 for inst_pkg in inst_pkgs:
1609 if self._pkg_visibility_check(inst_pkg):
1611 mypriority.satisfied = inst_pkg
1613 if not mypriority.satisfied:
1614 # none visible, so use highest
1615 mypriority.satisfied = inst_pkgs[0]
1617 # Dependencies of virtuals are considered to have the
1618 # same depth as the virtual itself.
1619 dep = Dependency(atom=atom,
1620 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1621 parent=virt_pkg, priority=mypriority, root=dep_root,
1622 collapsed_parent=pkg, collapsed_priority=dep_priority)
1625 if not atom.blocker and \
1626 not recurse_satisfied and \
1627 mypriority.satisfied and \
1628 mypriority.satisfied.visible and \
1629 dep.child is not None and \
1630 not dep.child.installed and \
1631 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1632 dep.child.slot_atom) is None:
1634 if dep.root == self._frozen_config.target_root:
1636 myarg = next(self._iter_atoms_for_pkg(dep.child))
1637 except StopIteration:
1639 except InvalidDependString:
1640 if not dep.child.installed:
1646 self._dynamic_config._ignored_deps.append(dep)
1649 if dep_priority.ignored and \
1650 not self._dynamic_config._traverse_ignored_deps:
1651 if is_virt and dep.child is not None:
1652 traversed_virt_pkgs.add(dep.child)
1654 self._dynamic_config._ignored_deps.append(dep)
1656 if not self._add_dep(dep,
1657 allow_unsatisfied=allow_unsatisfied):
1659 if is_virt and dep.child is not None:
1660 traversed_virt_pkgs.add(dep.child)
1663 writemsg_level("Exiting... %s\n" % (pkg,),
1664 noiselevel=-1, level=logging.DEBUG)
1668 def _minimize_children(self, parent, priority, root_config, atoms):
1670 Selects packages to satisfy the given atoms, and minimizes the
1671 number of selected packages. This serves to identify and eliminate
1672 redundant package selections when multiple atoms happen to specify
1682 dep_pkg, existing_node = self._select_package(
1683 root_config.root, atom)
1687 atom_pkg_map[atom] = dep_pkg
1689 if len(atom_pkg_map) < 2:
1690 for item in atom_pkg_map.items():
1696 for atom, pkg in atom_pkg_map.items():
1697 pkg_atom_map.setdefault(pkg, set()).add(atom)
1698 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1700 for cp, pkgs in cp_pkg_map.items():
1703 for atom in pkg_atom_map[pkg]:
1707 # Use a digraph to identify and eliminate any
1708 # redundant package selections.
1709 atom_pkg_graph = digraph()
1712 for atom in pkg_atom_map[pkg1]:
1714 atom_pkg_graph.add(pkg1, atom)
1715 atom_set = InternalPackageSet(initial_atoms=(atom,),
1720 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1721 atom_pkg_graph.add(pkg2, atom)
1724 eliminate_pkg = True
1725 for atom in atom_pkg_graph.parent_nodes(pkg):
1726 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1727 eliminate_pkg = False
1730 atom_pkg_graph.remove(pkg)
1732 # Yield ~, =*, < and <= atoms first, since those are more likely to
1733 # cause slot conflicts, and we want those atoms to be displayed
1734 # in the resulting slot conflict message (see bug #291142).
1737 for atom in cp_atoms:
1739 for child_pkg in atom_pkg_graph.child_nodes(atom):
1740 existing_node, matches = \
1741 self._check_slot_conflict(child_pkg, atom)
1742 if existing_node and not matches:
1746 conflict_atoms.append(atom)
1748 normal_atoms.append(atom)
1750 for atom in chain(conflict_atoms, normal_atoms):
1751 child_pkgs = atom_pkg_graph.child_nodes(atom)
1752 # if more than one child, yield highest version
1753 if len(child_pkgs) > 1:
1755 yield (atom, child_pkgs[-1])
1757 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1759 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1760 Yields non-disjunctive deps. Raises InvalidDependString when
1764 while i < len(dep_struct):
1766 if isinstance(x, list):
1767 for y in self._queue_disjunctive_deps(
1768 pkg, dep_root, dep_priority, x):
1771 self._queue_disjunction(pkg, dep_root, dep_priority,
1772 [ x, dep_struct[ i + 1 ] ] )
1776 x = portage.dep.Atom(x)
1777 except portage.exception.InvalidAtom:
1778 if not pkg.installed:
1779 raise portage.exception.InvalidDependString(
1780 "invalid atom: '%s'" % x)
1782 # Note: Eventually this will check for PROPERTIES=virtual
1783 # or whatever other metadata gets implemented for this
1785 if x.cp.startswith('virtual/'):
1786 self._queue_disjunction( pkg, dep_root,
1787 dep_priority, [ str(x) ] )
1792 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1793 self._dynamic_config._dep_disjunctive_stack.append(
1794 (pkg, dep_root, dep_priority, dep_struct))
1796 def _pop_disjunction(self, allow_unsatisfied):
1798 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1799 populate self._dynamic_config._dep_stack.
1801 pkg, dep_root, dep_priority, dep_struct = \
1802 self._dynamic_config._dep_disjunctive_stack.pop()
1803 dep_string = portage.dep.paren_enclose(dep_struct,
1804 unevaluated_atom=True)
1805 if not self._add_pkg_dep_string(
1806 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1810 def _priority(self, **kwargs):
1811 if "remove" in self._dynamic_config.myparams:
1812 priority_constructor = UnmergeDepPriority
1814 priority_constructor = DepPriority
1815 return priority_constructor(**kwargs)
1817 def _dep_expand(self, root_config, atom_without_category):
1819 @param root_config: a root config instance
1820 @type root_config: RootConfig
1821 @param atom_without_category: an atom without a category component
1822 @type atom_without_category: String
1824 @returns: a list of atoms containing categories (possibly empty)
1826 null_cp = portage.dep_getkey(insert_category_into_atom(
1827 atom_without_category, "null"))
1828 cat, atom_pn = portage.catsplit(null_cp)
1830 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1832 for db, pkg_type, built, installed, db_keys in dbs:
1833 for cat in db.categories:
1834 if db.cp_list("%s/%s" % (cat, atom_pn)):
1838 for cat in categories:
1839 deps.append(Atom(insert_category_into_atom(
1840 atom_without_category, cat), allow_repo=True))
1843 def _have_new_virt(self, root, atom_cp):
1845 for db, pkg_type, built, installed, db_keys in \
1846 self._dynamic_config._filtered_trees[root]["dbs"]:
1847 if db.cp_list(atom_cp):
1852 def _iter_atoms_for_pkg(self, pkg):
1853 depgraph_sets = self._dynamic_config.sets[pkg.root]
1854 atom_arg_map = depgraph_sets.atom_arg_map
1855 root_config = self._frozen_config.roots[pkg.root]
1856 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1857 if atom.cp != pkg.cp and \
1858 self._have_new_virt(pkg.root, atom.cp):
1861 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1862 visible_pkgs.reverse() # descending order
1864 for visible_pkg in visible_pkgs:
1865 if visible_pkg.cp != atom.cp:
1867 if pkg >= visible_pkg:
1868 # This is descending order, and we're not
1869 # interested in any versions <= pkg given.
1871 if pkg.slot_atom != visible_pkg.slot_atom:
1872 higher_slot = visible_pkg
1874 if higher_slot is not None:
1876 for arg in atom_arg_map[(atom, pkg.root)]:
1877 if isinstance(arg, PackageArg) and \
1882 def select_files(self, myfiles):
1883 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1884 self._dynamic_config._initial_arg_list and call self._resolve to create the
1885 appropriate depgraph and return a favorite list."""
1887 debug = "--debug" in self._frozen_config.myopts
1888 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1889 sets = root_config.sets
1890 depgraph_sets = self._dynamic_config.sets[root_config.root]
1892 myroot = self._frozen_config.target_root
1893 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1894 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1895 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1896 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1897 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1898 pkgsettings = self._frozen_config.pkgsettings[myroot]
1900 onlydeps = "--onlydeps" in self._frozen_config.myopts
1903 ext = os.path.splitext(x)[1]
1905 if not os.path.exists(x):
1907 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1908 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1909 elif os.path.exists(
1910 os.path.join(pkgsettings["PKGDIR"], x)):
1911 x = os.path.join(pkgsettings["PKGDIR"], x)
1913 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1914 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1915 return 0, myfavorites
1916 mytbz2=portage.xpak.tbz2(x)
1917 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1918 if os.path.realpath(x) != \
1919 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1920 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1921 self._dynamic_config._skip_restart = True
1922 return 0, myfavorites
1924 pkg = self._pkg(mykey, "binary", root_config,
1926 args.append(PackageArg(arg=x, package=pkg,
1927 root_config=root_config))
1928 elif ext==".ebuild":
1929 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1930 pkgdir = os.path.dirname(ebuild_path)
1931 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1932 cp = pkgdir[len(tree_root)+1:]
1933 e = portage.exception.PackageNotFound(
1934 ("%s is not in a valid portage tree " + \
1935 "hierarchy or does not exist") % x)
1936 if not portage.isvalidatom(cp):
1938 cat = portage.catsplit(cp)[0]
1939 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1940 if not portage.isvalidatom("="+mykey):
1942 ebuild_path = portdb.findname(mykey)
1944 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1945 cp, os.path.basename(ebuild_path)):
1946 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1947 self._dynamic_config._skip_restart = True
1948 return 0, myfavorites
1949 if mykey not in portdb.xmatch(
1950 "match-visible", portage.cpv_getkey(mykey)):
1951 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1952 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1953 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1954 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1957 raise portage.exception.PackageNotFound(
1958 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1959 pkg = self._pkg(mykey, "ebuild", root_config,
1960 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
1961 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
1962 args.append(PackageArg(arg=x, package=pkg,
1963 root_config=root_config))
1964 elif x.startswith(os.path.sep):
1965 if not x.startswith(myroot):
1966 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1967 " $ROOT.\n") % x, noiselevel=-1)
1968 self._dynamic_config._skip_restart = True
1970 # Queue these up since it's most efficient to handle
1971 # multiple files in a single iter_owners() call.
1972 lookup_owners.append(x)
1973 elif x.startswith("." + os.sep) or \
1974 x.startswith(".." + os.sep):
1975 f = os.path.abspath(x)
1976 if not f.startswith(myroot):
1977 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
1978 " $ROOT.\n") % (f, x), noiselevel=-1)
1979 self._dynamic_config._skip_restart = True
1981 lookup_owners.append(f)
1983 if x in ("system", "world"):
1985 if x.startswith(SETPREFIX):
1986 s = x[len(SETPREFIX):]
1988 raise portage.exception.PackageSetNotFound(s)
1989 if s in depgraph_sets.sets:
1992 depgraph_sets.sets[s] = pset
1993 args.append(SetArg(arg=x, pset=pset,
1994 root_config=root_config))
1996 if not is_valid_package_atom(x, allow_repo=True):
1997 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1999 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2000 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2001 self._dynamic_config._skip_restart = True
2003 # Don't expand categories or old-style virtuals here unless
2004 # necessary. Expansion of old-style virtuals here causes at
2005 # least the following problems:
2006 # 1) It's more difficult to determine which set(s) an atom
2007 # came from, if any.
2008 # 2) It takes away freedom from the resolver to choose other
2009 # possible expansions when necessary.
2011 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2012 root_config=root_config))
2014 expanded_atoms = self._dep_expand(root_config, x)
2015 installed_cp_set = set()
2016 for atom in expanded_atoms:
2017 if vardb.cp_list(atom.cp):
2018 installed_cp_set.add(atom.cp)
2020 if len(installed_cp_set) > 1:
2021 non_virtual_cps = set()
2022 for atom_cp in installed_cp_set:
2023 if not atom_cp.startswith("virtual/"):
2024 non_virtual_cps.add(atom_cp)
2025 if len(non_virtual_cps) == 1:
2026 installed_cp_set = non_virtual_cps
2028 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2029 installed_cp = next(iter(installed_cp_set))
2030 for atom in expanded_atoms:
2031 if atom.cp == installed_cp:
2033 for pkg in self._iter_match_pkgs_any(
2034 root_config, atom.without_use,
2036 if not pkg.installed:
2040 expanded_atoms = [atom]
2043 # If a non-virtual package and one or more virtual packages
2044 # are in expanded_atoms, use the non-virtual package.
2045 if len(expanded_atoms) > 1:
2046 number_of_virtuals = 0
2047 for expanded_atom in expanded_atoms:
2048 if expanded_atom.cp.startswith("virtual/"):
2049 number_of_virtuals += 1
2051 candidate = expanded_atom
2052 if len(expanded_atoms) - number_of_virtuals == 1:
2053 expanded_atoms = [ candidate ]
2055 if len(expanded_atoms) > 1:
2056 writemsg("\n\n", noiselevel=-1)
2057 ambiguous_package_name(x, expanded_atoms, root_config,
2058 self._frozen_config.spinner, self._frozen_config.myopts)
2059 self._dynamic_config._skip_restart = True
2060 return False, myfavorites
2062 atom = expanded_atoms[0]
2064 null_atom = Atom(insert_category_into_atom(x, "null"),
2066 cat, atom_pn = portage.catsplit(null_atom.cp)
2067 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2069 # Allow the depgraph to choose which virtual.
2070 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2075 args.append(AtomArg(arg=x, atom=atom,
2076 root_config=root_config))
2080 search_for_multiple = False
2081 if len(lookup_owners) > 1:
2082 search_for_multiple = True
2084 for x in lookup_owners:
2085 if not search_for_multiple and os.path.isdir(x):
2086 search_for_multiple = True
2087 relative_paths.append(x[len(myroot)-1:])
2090 for pkg, relative_path in \
2091 real_vardb._owners.iter_owners(relative_paths):
2092 owners.add(pkg.mycpv)
2093 if not search_for_multiple:
2097 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2098 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2099 self._dynamic_config._skip_restart = True
2103 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2105 # portage now masks packages with missing slot, but it's
2106 # possible that one was installed by an older version
2107 atom = Atom(portage.cpv_getkey(cpv))
2109 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2110 args.append(AtomArg(arg=atom, atom=atom,
2111 root_config=root_config))
2113 if "--update" in self._frozen_config.myopts:
2114 # In some cases, the greedy slots behavior can pull in a slot that
2115 # the user would want to uninstall due to it being blocked by a
2116 # newer version in a different slot. Therefore, it's necessary to
2117 # detect and discard any that should be uninstalled. Each time
2118 # that arguments are updated, package selections are repeated in
2119 # order to ensure consistency with the current arguments:
2121 # 1) Initialize args
2122 # 2) Select packages and generate initial greedy atoms
2123 # 3) Update args with greedy atoms
2124 # 4) Select packages and generate greedy atoms again, while
2125 # accounting for any blockers between selected packages
2126 # 5) Update args with revised greedy atoms
2128 self._set_args(args)
2131 greedy_args.append(arg)
2132 if not isinstance(arg, AtomArg):
2134 for atom in self._greedy_slots(arg.root_config, arg.atom):
2136 AtomArg(arg=arg.arg, atom=atom,
2137 root_config=arg.root_config))
2139 self._set_args(greedy_args)
2142 # Revise greedy atoms, accounting for any blockers
2143 # between selected packages.
2144 revised_greedy_args = []
2146 revised_greedy_args.append(arg)
2147 if not isinstance(arg, AtomArg):
2149 for atom in self._greedy_slots(arg.root_config, arg.atom,
2150 blocker_lookahead=True):
2151 revised_greedy_args.append(
2152 AtomArg(arg=arg.arg, atom=atom,
2153 root_config=arg.root_config))
2154 args = revised_greedy_args
2155 del revised_greedy_args
2157 self._set_args(args)
2159 myfavorites = set(myfavorites)
2161 if isinstance(arg, (AtomArg, PackageArg)):
2162 myfavorites.add(arg.atom)
2163 elif isinstance(arg, SetArg):
2164 myfavorites.add(arg.arg)
2165 myfavorites = list(myfavorites)
2168 portage.writemsg("\n", noiselevel=-1)
2169 # Order needs to be preserved since a feature of --nodeps
2170 # is to allow the user to force a specific merge order.
2171 self._dynamic_config._initial_arg_list = args[:]
2173 return self._resolve(myfavorites)
2175 def _resolve(self, myfavorites):
2176 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2177 call self._creategraph to process theier deps and return
2179 debug = "--debug" in self._frozen_config.myopts
2180 onlydeps = "--onlydeps" in self._frozen_config.myopts
2181 myroot = self._frozen_config.target_root
2182 pkgsettings = self._frozen_config.pkgsettings[myroot]
2183 pprovideddict = pkgsettings.pprovideddict
2184 virtuals = pkgsettings.getvirtuals()
2185 args = self._dynamic_config._initial_arg_list[:]
2186 for root, atom in chain(self._rebuild.rebuild_list,
2187 self._rebuild.reinstall_list):
2188 args.append(AtomArg(arg=atom, atom=atom,
2189 root_config=self._frozen_config.roots[root]))
2190 for arg in self._expand_set_args(args, add_to_digraph=True):
2191 for atom in arg.pset.getAtoms():
2192 self._spinner_update()
2193 dep = Dependency(atom=atom, onlydeps=onlydeps,
2194 root=myroot, parent=arg)
2196 pprovided = pprovideddict.get(atom.cp)
2197 if pprovided and portage.match_from_list(atom, pprovided):
2198 # A provided package has been specified on the command line.
2199 self._dynamic_config._pprovided_args.append((arg, atom))
2201 if isinstance(arg, PackageArg):
2202 if not self._add_pkg(arg.package, dep) or \
2203 not self._create_graph():
2204 if not self.need_restart():
2205 sys.stderr.write(("\n\n!!! Problem " + \
2206 "resolving dependencies for %s\n") % \
2208 return 0, myfavorites
2211 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
2212 (arg, atom), noiselevel=-1)
2213 pkg, existing_node = self._select_package(
2214 myroot, atom, onlydeps=onlydeps)
2216 pprovided_match = False
2217 for virt_choice in virtuals.get(atom.cp, []):
2218 expanded_atom = portage.dep.Atom(
2219 atom.replace(atom.cp, virt_choice.cp, 1))
2220 pprovided = pprovideddict.get(expanded_atom.cp)
2222 portage.match_from_list(expanded_atom, pprovided):
2223 # A provided package has been
2224 # specified on the command line.
2225 self._dynamic_config._pprovided_args.append((arg, atom))
2226 pprovided_match = True
2231 if not (isinstance(arg, SetArg) and \
2232 arg.name in ("selected", "system", "world")):
2233 self._dynamic_config._unsatisfied_deps_for_display.append(
2234 ((myroot, atom), {"myparent" : arg}))
2235 return 0, myfavorites
2237 self._dynamic_config._missing_args.append((arg, atom))
2239 if atom.cp != pkg.cp:
2240 # For old-style virtuals, we need to repeat the
2241 # package.provided check against the selected package.
2242 expanded_atom = atom.replace(atom.cp, pkg.cp)
2243 pprovided = pprovideddict.get(pkg.cp)
2245 portage.match_from_list(expanded_atom, pprovided):
2246 # A provided package has been
2247 # specified on the command line.
2248 self._dynamic_config._pprovided_args.append((arg, atom))
2250 if pkg.installed and "selective" not in self._dynamic_config.myparams:
2251 self._dynamic_config._unsatisfied_deps_for_display.append(
2252 ((myroot, atom), {"myparent" : arg}))
2253 # Previous behavior was to bail out in this case, but
2254 # since the dep is satisfied by the installed package,
2255 # it's more friendly to continue building the graph
2256 # and just show a warning message. Therefore, only bail
2257 # out here if the atom is not from either the system or
2259 if not (isinstance(arg, SetArg) and \
2260 arg.name in ("selected", "system", "world")):
2261 return 0, myfavorites
2263 # Add the selected package to the graph as soon as possible
2264 # so that later dep_check() calls can use it as feedback
2265 # for making more consistent atom selections.
2266 if not self._add_pkg(pkg, dep):
2267 if self.need_restart():
2269 elif isinstance(arg, SetArg):
2270 writemsg(("\n\n!!! Problem resolving " + \
2271 "dependencies for %s from %s\n") % \
2272 (atom, arg.arg), noiselevel=-1)
2274 writemsg(("\n\n!!! Problem resolving " + \
2275 "dependencies for %s\n") % \
2276 (atom,), noiselevel=-1)
2277 return 0, myfavorites
2279 except SystemExit as e:
2280 raise # Needed else can't exit
2281 except Exception as e:
2282 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2283 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2286 # Now that the root packages have been added to the graph,
2287 # process the dependencies.
2288 if not self._create_graph():
2289 return 0, myfavorites
2293 except self._unknown_internal_error:
2294 return False, myfavorites
2296 digraph_set = frozenset(self._dynamic_config.digraph)
2298 if digraph_set.intersection(
2299 self._dynamic_config._needed_unstable_keywords) or \
2300 digraph_set.intersection(
2301 self._dynamic_config._needed_p_mask_changes) or \
2302 digraph_set.intersection(
2303 self._dynamic_config._needed_use_config_changes) or \
2304 digraph_set.intersection(
2305 self._dynamic_config._needed_license_changes) :
2306 #We failed if the user needs to change the configuration
2307 self._dynamic_config._success_without_autounmask = True
2308 return False, myfavorites
2312 if self._rebuild.trigger_rebuilds():
2313 backtrack_infos = self._dynamic_config._backtrack_infos
2314 config = backtrack_infos.setdefault("config", {})
2315 config["rebuild_list"] = self._rebuild.rebuild_list
2316 config["reinstall_list"] = self._rebuild.reinstall_list
2317 self._dynamic_config._need_restart = True
2318 return False, myfavorites
2320 # We're true here unless we are missing binaries.
2321 return (True, myfavorites)
2323 def _set_args(self, args):
2325 Create the "__non_set_args__" package set from atoms and packages given as
2326 arguments. This method can be called multiple times if necessary.
2327 The package selection cache is automatically invalidated, since
2328 arguments influence package selections.
2333 for root in self._dynamic_config.sets:
2334 depgraph_sets = self._dynamic_config.sets[root]
2335 depgraph_sets.sets.setdefault('__non_set_args__',
2336 InternalPackageSet(allow_repo=True)).clear()
2337 depgraph_sets.atoms.clear()
2338 depgraph_sets.atom_arg_map.clear()
2339 set_atoms[root] = []
2340 non_set_atoms[root] = []
2342 # We don't add set args to the digraph here since that
2343 # happens at a later stage and we don't want to make
2344 # any state changes here that aren't reversed by a
2345 # another call to this method.
2346 for arg in self._expand_set_args(args, add_to_digraph=False):
2347 atom_arg_map = self._dynamic_config.sets[
2348 arg.root_config.root].atom_arg_map
2349 if isinstance(arg, SetArg):
2350 atom_group = set_atoms[arg.root_config.root]
2352 atom_group = non_set_atoms[arg.root_config.root]
2354 for atom in arg.pset.getAtoms():
2355 atom_group.append(atom)
2356 atom_key = (atom, arg.root_config.root)
2357 refs = atom_arg_map.get(atom_key)
2360 atom_arg_map[atom_key] = refs
2364 for root in self._dynamic_config.sets:
2365 depgraph_sets = self._dynamic_config.sets[root]
2366 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2367 non_set_atoms.get(root, [])))
2368 depgraph_sets.sets['__non_set_args__'].update(
2369 non_set_atoms.get(root, []))
2371 # Invalidate the package selection cache, since
2372 # arguments influence package selections.
2373 self._dynamic_config._highest_pkg_cache.clear()
2374 for trees in self._dynamic_config._filtered_trees.values():
2375 trees["porttree"].dbapi._clear_cache()
2377 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2379 Return a list of slot atoms corresponding to installed slots that
2380 differ from the slot of the highest visible match. When
2381 blocker_lookahead is True, slot atoms that would trigger a blocker
2382 conflict are automatically discarded, potentially allowing automatic
2383 uninstallation of older slots when appropriate.
2385 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2386 if highest_pkg is None:
2388 vardb = root_config.trees["vartree"].dbapi
2390 for cpv in vardb.match(atom):
2391 # don't mix new virtuals with old virtuals
2392 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2393 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2395 slots.add(highest_pkg.metadata["SLOT"])
2399 slots.remove(highest_pkg.metadata["SLOT"])
2402 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2403 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2404 if pkg is not None and \
2405 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2406 greedy_pkgs.append(pkg)
2409 if not blocker_lookahead:
2410 return [pkg.slot_atom for pkg in greedy_pkgs]
2413 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2414 for pkg in greedy_pkgs + [highest_pkg]:
2415 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2417 selected_atoms = self._select_atoms(
2418 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2419 parent=pkg, strict=True)
2420 except portage.exception.InvalidDependString:
2423 for atoms in selected_atoms.values():
2424 blocker_atoms.extend(x for x in atoms if x.blocker)
2425 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2427 if highest_pkg not in blockers:
2430 # filter packages with invalid deps
2431 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2433 # filter packages that conflict with highest_pkg
2434 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2435 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2436 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2441 # If two packages conflict, discard the lower version.
2442 discard_pkgs = set()
2443 greedy_pkgs.sort(reverse=True)
2444 for i in range(len(greedy_pkgs) - 1):
2445 pkg1 = greedy_pkgs[i]
2446 if pkg1 in discard_pkgs:
2448 for j in range(i + 1, len(greedy_pkgs)):
2449 pkg2 = greedy_pkgs[j]
2450 if pkg2 in discard_pkgs:
2452 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2453 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2455 discard_pkgs.add(pkg2)
2457 return [pkg.slot_atom for pkg in greedy_pkgs \
2458 if pkg not in discard_pkgs]
2460 def _select_atoms_from_graph(self, *pargs, **kwargs):
2462 Prefer atoms matching packages that have already been
2463 added to the graph or those that are installed and have
2464 not been scheduled for replacement.
2466 kwargs["trees"] = self._dynamic_config._graph_trees
2467 return self._select_atoms_highest_available(*pargs, **kwargs)
2469 def _select_atoms_highest_available(self, root, depstring,
2470 myuse=None, parent=None, strict=True, trees=None, priority=None):
2471 """This will raise InvalidDependString if necessary. If trees is
2472 None then self._dynamic_config._filtered_trees is used."""
2474 pkgsettings = self._frozen_config.pkgsettings[root]
2476 trees = self._dynamic_config._filtered_trees
2477 mytrees = trees[root]
2478 atom_graph = digraph()
2480 # Temporarily disable autounmask so that || preferences
2481 # account for masking and USE settings.
2482 _autounmask_backup = self._dynamic_config._autounmask
2483 self._dynamic_config._autounmask = False
2484 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2486 if parent is not None:
2487 trees[root]["parent"] = parent
2488 trees[root]["atom_graph"] = atom_graph
2489 if priority is not None:
2490 trees[root]["priority"] = priority
2491 mycheck = portage.dep_check(depstring, None,
2492 pkgsettings, myuse=myuse,
2493 myroot=root, trees=trees)
2495 self._dynamic_config._autounmask = _autounmask_backup
2496 del mytrees["pkg_use_enabled"]
2497 if parent is not None:
2498 trees[root].pop("parent")
2499 trees[root].pop("atom_graph")
2500 if priority is not None:
2501 trees[root].pop("priority")
2503 raise portage.exception.InvalidDependString(mycheck[1])
2505 selected_atoms = mycheck[1]
2506 elif parent not in atom_graph:
2507 selected_atoms = {parent : mycheck[1]}
2509 # Recursively traversed virtual dependencies, and their
2510 # direct dependencies, are considered to have the same
2511 # depth as direct dependencies.
2512 if parent.depth is None:
2515 virt_depth = parent.depth + 1
2516 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2517 selected_atoms = OrderedDict()
2518 node_stack = [(parent, None, None)]
2519 traversed_nodes = set()
2521 node, node_parent, parent_atom = node_stack.pop()
2522 traversed_nodes.add(node)
2526 if node_parent is parent:
2527 if priority is None:
2528 node_priority = None
2530 node_priority = priority.copy()
2532 # virtuals only have runtime deps
2533 node_priority = self._priority(runtime=True)
2535 k = Dependency(atom=parent_atom,
2536 blocker=parent_atom.blocker, child=node,
2537 depth=virt_depth, parent=node_parent,
2538 priority=node_priority, root=node.root)
2541 selected_atoms[k] = child_atoms
2542 for atom_node in atom_graph.child_nodes(node):
2543 child_atom = atom_node[0]
2544 if id(child_atom) not in chosen_atom_ids:
2546 child_atoms.append(child_atom)
2547 for child_node in atom_graph.child_nodes(atom_node):
2548 if child_node in traversed_nodes:
2550 if not portage.match_from_list(
2551 child_atom, [child_node]):
2552 # Typically this means that the atom
2553 # specifies USE deps that are unsatisfied
2554 # by the selected package. The caller will
2555 # record this as an unsatisfied dependency
2558 node_stack.append((child_node, node, child_atom))
2560 return selected_atoms
2562 def _expand_virt_from_graph(self, root, atom):
2563 if not isinstance(atom, Atom):
2565 graphdb = self._dynamic_config.mydbapi[root]
2566 match = graphdb.match_pkgs(atom)
2571 if not pkg.cpv.startswith("virtual/"):
2575 rdepend = self._select_atoms_from_graph(
2576 pkg.root, pkg.metadata.get("RDEPEND", ""),
2577 myuse=self._pkg_use_enabled(pkg),
2578 parent=pkg, strict=False)
2579 except InvalidDependString as e:
2580 writemsg_level("!!! Invalid RDEPEND in " + \
2581 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2582 (pkg.root, pkg.cpv, e),
2583 noiselevel=-1, level=logging.ERROR)
2587 for atoms in rdepend.values():
2589 if hasattr(atom, "_orig_atom"):
2590 # Ignore virtual atoms since we're only
2591 # interested in expanding the real atoms.
2595 def _get_dep_chain(self, start_node, target_atom=None,
2596 unsatisfied_dependency=False):
2598 Returns a list of (atom, node_type) pairs that represent a dep chain.
2599 If target_atom is None, the first package shown is pkg's parent.
2600 If target_atom is not None the first package shown is pkg.
2601 If unsatisfied_dependency is True, the first parent is select who's
2602 dependency is not satisfied by 'pkg'. This is need for USE changes.
2603 (Does not support target_atom.)
2605 traversed_nodes = set()
2609 all_parents = self._dynamic_config._parent_atoms
2611 if target_atom is not None and isinstance(node, Package):
2612 affecting_use = set()
2613 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2615 affecting_use.update(extract_affecting_use(
2616 node.metadata[dep_str], target_atom))
2617 except InvalidDependString:
2618 if not node.installed:
2620 affecting_use.difference_update(node.use.mask, node.use.force)
2621 pkg_name = _unicode_decode("%s") % (node.cpv,)
2624 for flag in affecting_use:
2625 if flag in self._pkg_use_enabled(node):
2628 usedep.append("-"+flag)
2629 pkg_name += "[%s]" % ",".join(usedep)
2631 dep_chain.append((pkg_name, node.type_name))
2633 while node is not None:
2634 traversed_nodes.add(node)
2636 if isinstance(node, DependencyArg):
2637 if self._dynamic_config.digraph.parent_nodes(node):
2640 node_type = "argument"
2641 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2643 elif node is not start_node:
2644 for ppkg, patom in all_parents[child]:
2646 atom = patom.unevaluated_atom
2650 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2651 if priority.buildtime:
2652 dep_strings.add(node.metadata["DEPEND"])
2653 if priority.runtime:
2654 dep_strings.add(node.metadata["RDEPEND"])
2655 if priority.runtime_post:
2656 dep_strings.add(node.metadata["PDEPEND"])
2658 affecting_use = set()
2659 for dep_str in dep_strings:
2660 affecting_use.update(extract_affecting_use(dep_str, atom))
2662 #Don't show flags as 'affecting' if the user can't change them,
2663 affecting_use.difference_update(node.use.mask, \
2666 pkg_name = _unicode_decode("%s") % (node.cpv,)
2669 for flag in affecting_use:
2670 if flag in self._pkg_use_enabled(node):
2673 usedep.append("-"+flag)
2674 pkg_name += "[%s]" % ",".join(usedep)
2676 dep_chain.append((pkg_name, node.type_name))
2678 if node not in self._dynamic_config.digraph:
2679 # The parent is not in the graph due to backtracking.
2682 # When traversing to parents, prefer arguments over packages
2683 # since arguments are root nodes. Never traverse the same
2684 # package twice, in order to prevent an infinite loop.
2686 selected_parent = None
2689 parent_unsatisfied = None
2691 for parent in self._dynamic_config.digraph.parent_nodes(node):
2692 if parent in traversed_nodes:
2694 if isinstance(parent, DependencyArg):
2697 if isinstance(parent, Package) and \
2698 parent.operation == "merge":
2699 parent_merge = parent
2700 if unsatisfied_dependency and node is start_node:
2701 # Make sure that pkg doesn't satisfy parent's dependency.
2702 # This ensures that we select the correct parent for use
2704 for ppkg, atom in all_parents[start_node]:
2706 atom_set = InternalPackageSet(initial_atoms=(atom,))
2707 if not atom_set.findAtomForPackage(start_node):
2708 parent_unsatisfied = parent
2711 selected_parent = parent
2713 if parent_unsatisfied is not None:
2714 selected_parent = parent_unsatisfied
2715 elif parent_merge is not None:
2716 # Prefer parent in the merge list (bug #354747).
2717 selected_parent = parent_merge
2718 elif parent_arg is not None:
2719 if self._dynamic_config.digraph.parent_nodes(parent_arg):
2720 selected_parent = parent_arg
2723 (_unicode_decode("%s") % (parent_arg,), "argument"))
2724 selected_parent = None
2726 node = selected_parent
2729 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2730 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2732 for node, node_type in dep_chain:
2733 if node_type == "argument":
2734 display_list.append("required by %s (argument)" % node)
2736 display_list.append("required by %s" % node)
2738 msg = "#" + ", ".join(display_list) + "\n"
2742 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2743 check_backtrack=False):
2745 When check_backtrack=True, no output is produced and
2746 the method either returns or raises _backtrack_mask if
2747 a matching package has been masked by backtracking.
2749 backtrack_mask = False
2750 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
2752 xinfo = '"%s"' % atom.unevaluated_atom
2755 if isinstance(myparent, AtomArg):
2756 xinfo = _unicode_decode('"%s"') % (myparent,)
2757 # Discard null/ from failed cpv_expand category expansion.
2758 xinfo = xinfo.replace("null/", "")
2760 xinfo = "%s for %s" % (xinfo, root)
2761 masked_packages = []
2763 missing_use_adjustable = set()
2764 required_use_unsatisfied = []
2765 masked_pkg_instances = set()
2766 missing_licenses = []
2767 have_eapi_mask = False
2768 pkgsettings = self._frozen_config.pkgsettings[root]
2769 root_config = self._frozen_config.roots[root]
2770 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2771 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2772 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2773 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2774 for db, pkg_type, built, installed, db_keys in dbs:
2778 if hasattr(db, "xmatch"):
2779 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
2781 cpv_list = db.match(atom.without_use)
2783 if atom.repo is None and hasattr(db, "getRepositories"):
2784 repo_list = db.getRepositories()
2786 repo_list = [atom.repo]
2790 for cpv in cpv_list:
2791 for repo in repo_list:
2792 if not db.cpv_exists(cpv, myrepo=repo):
2795 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2796 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2798 if metadata is not None:
2800 repo = metadata.get('repository')
2801 pkg = self._pkg(cpv, pkg_type, root_config,
2802 installed=installed, myrepo=repo)
2803 if not atom_set.findAtomForPackage(pkg,
2804 modified_use=self._pkg_use_enabled(pkg)):
2806 # pkg.metadata contains calculated USE for ebuilds,
2807 # required later for getMissingLicenses.
2808 metadata = pkg.metadata
2809 if pkg in self._dynamic_config._runtime_pkg_mask:
2810 backtrack_reasons = \
2811 self._dynamic_config._runtime_pkg_mask[pkg]
2812 mreasons.append('backtracking: %s' % \
2813 ', '.join(sorted(backtrack_reasons)))
2814 backtrack_mask = True
2815 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2816 modified_use=self._pkg_use_enabled(pkg)):
2817 mreasons = ["exclude option"]
2819 masked_pkg_instances.add(pkg)
2820 if atom.unevaluated_atom.use:
2822 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2823 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2824 missing_use.append(pkg)
2828 writemsg("violated_conditionals raised " + \
2829 "InvalidAtom: '%s' parent: %s" % \
2830 (atom, myparent), noiselevel=-1)
2832 if not mreasons and \
2834 pkg.metadata["REQUIRED_USE"] and \
2835 eapi_has_required_use(pkg.metadata["EAPI"]):
2836 if not check_required_use(
2837 pkg.metadata["REQUIRED_USE"],
2838 self._pkg_use_enabled(pkg),
2839 pkg.iuse.is_valid_flag):
2840 required_use_unsatisfied.append(pkg)
2842 root_slot = (pkg.root, pkg.slot_atom)
2843 if pkg.built and root_slot in self._rebuild.rebuild_list:
2844 mreasons = ["need to rebuild from source"]
2845 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
2846 mreasons = ["need to rebuild from source"]
2847 elif pkg.built and not mreasons:
2848 mreasons = ["use flag configuration mismatch"]
2849 masked_packages.append(
2850 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
2854 raise self._backtrack_mask()
2858 missing_use_reasons = []
2859 missing_iuse_reasons = []
2860 for pkg in missing_use:
2861 use = self._pkg_use_enabled(pkg)
2863 #Use the unevaluated atom here, because some flags might have gone
2864 #lost during evaluation.
2865 required_flags = atom.unevaluated_atom.use.required
2866 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2870 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2871 missing_iuse_reasons.append((pkg, mreasons))
2873 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2874 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2876 untouchable_flags = \
2877 frozenset(chain(pkg.use.mask, pkg.use.force))
2878 if untouchable_flags.intersection(
2879 chain(need_enable, need_disable)):
2882 missing_use_adjustable.add(pkg)
2883 required_use = pkg.metadata["REQUIRED_USE"]
2884 required_use_warning = ""
2886 old_use = self._pkg_use_enabled(pkg)
2887 new_use = set(self._pkg_use_enabled(pkg))
2888 for flag in need_enable:
2890 for flag in need_disable:
2891 new_use.discard(flag)
2892 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2893 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2894 required_use_warning = ", this change violates use flag constraints " + \
2895 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2897 if need_enable or need_disable:
2899 changes.extend(colorize("red", "+" + x) \
2900 for x in need_enable)
2901 changes.extend(colorize("blue", "-" + x) \
2902 for x in need_disable)
2903 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2904 missing_use_reasons.append((pkg, mreasons))
2906 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2907 # Lets see if the violated use deps are conditional.
2908 # If so, suggest to change them on the parent.
2910 # If the child package is masked then a change to
2911 # parent USE is not a valid solution (a normal mask
2912 # message should be displayed instead).
2913 if pkg in masked_pkg_instances:
2917 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2918 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2919 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2920 #all violated use deps are conditional
2922 conditional = violated_atom.use.conditional
2923 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2924 conditional.enabled, conditional.disabled))
2926 untouchable_flags = \
2927 frozenset(chain(myparent.use.mask, myparent.use.force))
2928 if untouchable_flags.intersection(involved_flags):
2931 required_use = myparent.metadata["REQUIRED_USE"]
2932 required_use_warning = ""
2934 old_use = self._pkg_use_enabled(myparent)
2935 new_use = set(self._pkg_use_enabled(myparent))
2936 for flag in involved_flags:
2938 new_use.discard(flag)
2941 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2942 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2943 required_use_warning = ", this change violates use flag constraints " + \
2944 "defined by %s: '%s'" % (myparent.cpv, \
2945 human_readable_required_use(required_use))
2947 for flag in involved_flags:
2948 if flag in self._pkg_use_enabled(myparent):
2949 changes.append(colorize("blue", "-" + flag))
2951 changes.append(colorize("red", "+" + flag))
2952 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2953 if (myparent, mreasons) not in missing_use_reasons:
2954 missing_use_reasons.append((myparent, mreasons))
2956 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2957 in missing_use_reasons if pkg not in masked_pkg_instances]
2959 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2960 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2962 show_missing_use = False
2963 if unmasked_use_reasons:
2964 # Only show the latest version.
2965 show_missing_use = []
2967 parent_reason = None
2968 for pkg, mreasons in unmasked_use_reasons:
2970 if parent_reason is None:
2971 #This happens if a use change on the parent
2972 #leads to a satisfied conditional use dep.
2973 parent_reason = (pkg, mreasons)
2974 elif pkg_reason is None:
2975 #Don't rely on the first pkg in unmasked_use_reasons,
2976 #being the highest version of the dependency.
2977 pkg_reason = (pkg, mreasons)
2979 show_missing_use.append(pkg_reason)
2981 show_missing_use.append(parent_reason)
2983 elif unmasked_iuse_reasons:
2984 masked_with_iuse = False
2985 for pkg in masked_pkg_instances:
2986 #Use atom.unevaluated here, because some flags might have gone
2987 #lost during evaluation.
2988 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2989 # Package(s) with required IUSE are masked,
2990 # so display a normal masking message.
2991 masked_with_iuse = True
2993 if not masked_with_iuse:
2994 show_missing_use = unmasked_iuse_reasons
2996 if required_use_unsatisfied:
2997 # If there's a higher unmasked version in missing_use_adjustable
2998 # then we want to show that instead.
2999 for pkg in missing_use_adjustable:
3000 if pkg not in masked_pkg_instances and \
3001 pkg > required_use_unsatisfied[0]:
3002 required_use_unsatisfied = False
3007 if required_use_unsatisfied:
3008 # We have an unmasked package that only requires USE adjustment
3009 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3010 # that the user wants the latest version, so only the first
3011 # instance is displayed.
3012 pkg = required_use_unsatisfied[0]
3013 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3014 writemsg_stdout("\n!!! " + \
3015 colorize("BAD", "The ebuild selected to satisfy ") + \
3016 colorize("INFORM", xinfo) + \
3017 colorize("BAD", " has unmet requirements.") + "\n",
3019 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3020 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
3022 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
3023 "are unsatisfied:\n", noiselevel=-1)
3024 reduced_noise = check_required_use(
3025 pkg.metadata["REQUIRED_USE"],
3026 self._pkg_use_enabled(pkg),
3027 pkg.iuse.is_valid_flag).tounicode()
3028 writemsg_stdout(" %s\n" % \
3029 human_readable_required_use(reduced_noise),
3031 normalized_required_use = \
3032 " ".join(pkg.metadata["REQUIRED_USE"].split())
3033 if reduced_noise != normalized_required_use:
3034 writemsg_stdout("\n The above constraints " + \
3035 "are a subset of the following complete expression:\n",
3037 writemsg_stdout(" %s\n" % \
3038 human_readable_required_use(normalized_required_use),
3040 writemsg_stdout("\n", noiselevel=-1)
3042 elif show_missing_use:
3043 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3044 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3045 for pkg, mreasons in show_missing_use:
3046 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3048 elif masked_packages:
3049 writemsg_stdout("\n!!! " + \
3050 colorize("BAD", "All ebuilds that could satisfy ") + \
3051 colorize("INFORM", xinfo) + \
3052 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3053 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3054 have_eapi_mask = show_masked_packages(masked_packages)
3056 writemsg_stdout("\n", noiselevel=-1)
3057 msg = ("The current version of portage supports " + \
3058 "EAPI '%s'. You must upgrade to a newer version" + \
3059 " of portage before EAPI masked packages can" + \
3060 " be installed.") % portage.const.EAPI
3061 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3062 writemsg_stdout("\n", noiselevel=-1)
3066 if not atom.cp.startswith("null/"):
3067 for pkg in self._iter_match_pkgs_any(
3068 root_config, Atom(atom.cp)):
3072 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3073 if isinstance(myparent, AtomArg) and \
3075 self._frozen_config.myopts.get(
3076 "--misspell-suggestions", "y") != "n":
3077 cp = myparent.atom.cp.lower()
3078 cat, pkg = portage.catsplit(cp)
3082 writemsg_stdout("\nemerge: searching for similar names..."
3086 all_cp.update(vardb.cp_all())
3087 all_cp.update(portdb.cp_all())
3088 if "--usepkg" in self._frozen_config.myopts:
3089 all_cp.update(bindb.cp_all())
3092 for cp_orig in all_cp:
3093 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3094 all_cp = set(orig_cp_map)
3097 matches = difflib.get_close_matches(cp, all_cp)
3100 for other_cp in all_cp:
3101 other_pkg = portage.catsplit(other_cp)[1]
3102 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3103 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3105 for pkg_match in pkg_matches:
3106 matches.extend(pkg_to_cp[pkg_match])
3108 matches_orig_case = []
3110 matches_orig_case.extend(orig_cp_map[cp])
3111 matches = matches_orig_case
3113 if len(matches) == 1:
3114 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
3116 elif len(matches) > 1:
3118 "\nemerge: Maybe you meant any of these: %s?\n" % \
3119 (", ".join(matches),), noiselevel=-1)
3121 # Generally, this would only happen if
3122 # all dbapis are empty.
3123 writemsg_stdout(" nothing similar found.\n"
3126 if not isinstance(myparent, AtomArg):
3127 # It's redundant to show parent for AtomArg since
3128 # it's the same as 'xinfo' displayed above.
3129 dep_chain = self._get_dep_chain(myparent, atom)
3130 for node, node_type in dep_chain:
3131 msg.append('(dependency required by "%s" [%s])' % \
3132 (colorize('INFORM', _unicode_decode("%s") % \
3133 (node)), node_type))
3136 writemsg_stdout("\n".join(msg), noiselevel=-1)
3137 writemsg_stdout("\n", noiselevel=-1)
3141 writemsg_stdout("\n", noiselevel=-1)
3143 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3144 for db, pkg_type, built, installed, db_keys in \
3145 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3146 for pkg in self._iter_match_pkgs(root_config,
3147 pkg_type, atom, onlydeps=onlydeps):
3150 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3152 Iterate over Package instances of pkg_type matching the given atom.
3153 This does not check visibility and it also does not match USE for
3154 unbuilt ebuilds since USE are lazily calculated after visibility
3155 checks (to avoid the expense when possible).
3158 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3160 if hasattr(db, "xmatch"):
3161 # For portdbapi we match only against the cpv, in order
3162 # to bypass unnecessary cache access for things like IUSE
3163 # and SLOT. Later, we cache the metadata in a Package
3164 # instance, and use that for further matching. This
3165 # optimization is especially relevant since
3166 # pordbapi.aux_get() does not cache calls that have
3167 # myrepo or mytree arguments.
3168 cpv_list = db.xmatch("match-all-cpv-only", atom)
3170 cpv_list = db.match(atom)
3172 # USE=multislot can make an installed package appear as if
3173 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3174 # won't do any good as long as USE=multislot is enabled since
3175 # the newly built package still won't have the expected slot.
3176 # Therefore, assume that such SLOT dependencies are already
3177 # satisfied rather than forcing a rebuild.
3178 installed = pkg_type == 'installed'
3179 if installed and not cpv_list and atom.slot:
3180 for cpv in db.match(atom.cp):
3181 slot_available = False
3182 for other_db, other_type, other_built, \
3183 other_installed, other_keys in \
3184 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3187 other_db.aux_get(cpv, ["SLOT"])[0]:
3188 slot_available = True
3192 if not slot_available:
3194 inst_pkg = self._pkg(cpv, "installed",
3195 root_config, installed=installed, myrepo = atom.repo)
3196 # Remove the slot from the atom and verify that
3197 # the package matches the resulting atom.
3198 if portage.match_from_list(
3199 atom.without_slot, [inst_pkg]):
3204 atom_set = InternalPackageSet(initial_atoms=(atom,),
3206 if atom.repo is None and hasattr(db, "getRepositories"):
3207 repo_list = db.getRepositories()
3209 repo_list = [atom.repo]
3213 for cpv in cpv_list:
3214 for repo in repo_list:
3217 pkg = self._pkg(cpv, pkg_type, root_config,
3218 installed=installed, onlydeps=onlydeps, myrepo=repo)
3219 except portage.exception.PackageNotFound:
3222 # A cpv can be returned from dbapi.match() as an
3223 # old-style virtual match even in cases when the
3224 # package does not actually PROVIDE the virtual.
3225 # Filter out any such false matches here.
3227 # Make sure that cpv from the current repo satisfies the atom.
3228 # This might not be the case if there are several repos with
3229 # the same cpv, but different metadata keys, like SLOT.
3230 # Also, for portdbapi, parts of the match that require
3231 # metadata access are deferred until we have cached the
3232 # metadata in a Package instance.
3233 if not atom_set.findAtomForPackage(pkg,
3234 modified_use=self._pkg_use_enabled(pkg)):
3238 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3239 cache_key = (root, atom, onlydeps)
3240 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3243 if pkg and not existing:
3244 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3245 if existing and existing == pkg:
3246 # Update the cache to reflect that the
3247 # package has been added to the graph.
3249 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3251 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3252 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3255 settings = pkg.root_config.settings
3256 if self._pkg_visibility_check(pkg) and \
3257 not (pkg.installed and pkg.masks):
3258 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3261 def _want_installed_pkg(self, pkg):
3263 Given an installed package returned from select_pkg, return
3264 True if the user has not explicitly requested for this package
3265 to be replaced (typically via an atom on the command line).
3267 if "selective" not in self._dynamic_config.myparams and \
3268 pkg.root == self._frozen_config.target_root:
3270 next(self._iter_atoms_for_pkg(pkg))
3271 except StopIteration:
3273 except portage.exception.InvalidDependString:
3279 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3280 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3282 default_selection = (pkg, existing)
3284 if self._dynamic_config._autounmask is True:
3285 if pkg is not None and \
3287 not self._want_installed_pkg(pkg):
3290 for only_use_changes in True, False:
3294 for allow_unmasks in (False, True):
3295 if only_use_changes and allow_unmasks:
3302 self._wrapped_select_pkg_highest_available_imp(
3303 root, atom, onlydeps=onlydeps,
3304 allow_use_changes=True,
3305 allow_unstable_keywords=(not only_use_changes),
3306 allow_license_changes=(not only_use_changes),
3307 allow_unmasks=allow_unmasks)
3309 if pkg is not None and \
3311 not self._want_installed_pkg(pkg):
3314 if self._dynamic_config._need_restart:
3318 # This ensures that we can fall back to an installed package
3319 # that may have been rejected in the autounmask path above.
3320 return default_selection
3322 return pkg, existing
3324 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3329 if pkg in self._dynamic_config.digraph:
3330 # Sometimes we need to temporarily disable
3331 # dynamic_config._autounmask, but for overall
3332 # consistency in dependency resolution, in any
3333 # case we want to respect autounmask visibity
3334 # for packages that have already been added to
3335 # the dependency graph.
3338 if not self._dynamic_config._autounmask:
3341 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3342 root_config = self._frozen_config.roots[pkg.root]
3343 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
3345 masked_by_unstable_keywords = False
3346 masked_by_missing_keywords = False
3347 missing_licenses = None
3348 masked_by_something_else = False
3349 masked_by_p_mask = False
3351 for reason in mreasons:
3352 hint = reason.unmask_hint
3355 masked_by_something_else = True
3356 elif hint.key == "unstable keyword":
3357 masked_by_unstable_keywords = True
3358 if hint.value == "**":
3359 masked_by_missing_keywords = True
3360 elif hint.key == "p_mask":
3361 masked_by_p_mask = True
3362 elif hint.key == "license":
3363 missing_licenses = hint.value
3365 masked_by_something_else = True
3367 if masked_by_something_else:
3370 if pkg in self._dynamic_config._needed_unstable_keywords:
3371 #If the package is already keyworded, remove the mask.
3372 masked_by_unstable_keywords = False
3373 masked_by_missing_keywords = False
3375 if pkg in self._dynamic_config._needed_p_mask_changes:
3376 #If the package is already keyworded, remove the mask.
3377 masked_by_p_mask = False
3379 if missing_licenses:
3380 #If the needed licenses are already unmasked, remove the mask.
3381 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
3383 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
3384 #Package has already been unmasked.
3387 #We treat missing keywords in the same way as masks.
3388 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
3389 (masked_by_missing_keywords and not allow_unmasks) or \
3390 (masked_by_p_mask and not allow_unmasks) or \
3391 (missing_licenses and not allow_license_changes):
3392 #We are not allowed to do the needed changes.
3395 if masked_by_unstable_keywords:
3396 self._dynamic_config._needed_unstable_keywords.add(pkg)
3397 backtrack_infos = self._dynamic_config._backtrack_infos
3398 backtrack_infos.setdefault("config", {})
3399 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
3400 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
3402 if masked_by_p_mask:
3403 self._dynamic_config._needed_p_mask_changes.add(pkg)
3404 backtrack_infos = self._dynamic_config._backtrack_infos
3405 backtrack_infos.setdefault("config", {})
3406 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
3407 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
3409 if missing_licenses:
3410 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
3411 backtrack_infos = self._dynamic_config._backtrack_infos
3412 backtrack_infos.setdefault("config", {})
3413 backtrack_infos["config"].setdefault("needed_license_changes", set())
3414 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
3418 def _pkg_use_enabled(self, pkg, target_use=None):
3420 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
3421 If target_use is given, the need changes are computed to make the package useable.
3422 Example: target_use = { "foo": True, "bar": False }
3423 The flags target_use must be in the pkg's IUSE.
3425 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
3427 if target_use is None:
3428 if needed_use_config_change is None:
3429 return pkg.use.enabled
3431 return needed_use_config_change[0]
3433 if needed_use_config_change is not None:
3434 old_use = needed_use_config_change[0]
3436 old_changes = needed_use_config_change[1]
3437 new_changes = old_changes.copy()
3439 old_use = pkg.use.enabled
3444 for flag, state in target_use.items():
3446 if flag not in old_use:
3447 if new_changes.get(flag) == False:
3449 new_changes[flag] = True
3453 if new_changes.get(flag) == True:
3455 new_changes[flag] = False
3456 new_use.update(old_use.difference(target_use))
3458 def want_restart_for_use_change(pkg, new_use):
3459 if pkg not in self._dynamic_config.digraph.nodes:
3462 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
3463 dep = pkg.metadata[key]
3464 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3465 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3467 if old_val != new_val:
3470 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3471 if not parent_atoms:
3474 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
3475 for ppkg, atom in parent_atoms:
3476 if not atom.use or \
3477 not atom.use.required.intersection(changes):
3484 if new_changes != old_changes:
3485 #Don't do the change if it violates REQUIRED_USE.
3486 required_use = pkg.metadata["REQUIRED_USE"]
3487 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3488 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3491 if pkg.use.mask.intersection(new_changes) or \
3492 pkg.use.force.intersection(new_changes):
3495 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
3496 backtrack_infos = self._dynamic_config._backtrack_infos
3497 backtrack_infos.setdefault("config", {})
3498 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
3499 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
3500 if want_restart_for_use_change(pkg, new_use):
3501 self._dynamic_config._need_restart = True
3504 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
3505 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3506 root_config = self._frozen_config.roots[root]
3507 pkgsettings = self._frozen_config.pkgsettings[root]
3508 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3509 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3510 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3511 # List of acceptable packages, ordered by type preference.
3512 matched_packages = []
3513 matched_pkgs_ignore_use = []
3514 highest_version = None
3515 if not isinstance(atom, portage.dep.Atom):
3516 atom = portage.dep.Atom(atom)
3518 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
3519 existing_node = None
3521 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3522 usepkg = "--usepkg" in self._frozen_config.myopts
3523 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3524 empty = "empty" in self._dynamic_config.myparams
3525 selective = "selective" in self._dynamic_config.myparams
3527 noreplace = "--noreplace" in self._frozen_config.myopts
3528 avoid_update = "--update" not in self._frozen_config.myopts
3529 dont_miss_updates = "--update" in self._frozen_config.myopts
3530 use_ebuild_visibility = self._frozen_config.myopts.get(
3531 '--use-ebuild-visibility', 'n') != 'n'
3532 reinstall_atoms = self._frozen_config.reinstall_atoms
3533 usepkg_exclude = self._frozen_config.usepkg_exclude
3534 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
3536 # Behavior of the "selective" parameter depends on
3537 # whether or not a package matches an argument atom.
3538 # If an installed package provides an old-style
3539 # virtual that is no longer provided by an available
3540 # package, the installed package may match an argument
3541 # atom even though none of the available packages do.
3542 # Therefore, "selective" logic does not consider
3543 # whether or not an installed package matches an
3544 # argument atom. It only considers whether or not
3545 # available packages match argument atoms, which is
3546 # represented by the found_available_arg flag.
3547 found_available_arg = False
3548 packages_with_invalid_use_config = []
3549 for find_existing_node in True, False:
3552 for db, pkg_type, built, installed, db_keys in dbs:
3555 if installed and not find_existing_node:
3556 want_reinstall = reinstall or empty or \
3557 (found_available_arg and not selective)
3558 if want_reinstall and matched_packages:
3561 # Ignore USE deps for the initial match since we want to
3562 # ensure that updates aren't missed solely due to the user's
3563 # USE configuration.
3564 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3566 if pkg in self._dynamic_config._runtime_pkg_mask:
3567 # The package has been masked by the backtracking logic
3569 root_slot = (pkg.root, pkg.slot_atom)
3570 if pkg.built and root_slot in self._rebuild.rebuild_list:
3572 if (pkg.installed and
3573 root_slot in self._rebuild.reinstall_list):
3576 if not pkg.installed and \
3577 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3578 modified_use=self._pkg_use_enabled(pkg)):
3581 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
3582 modified_use=self._pkg_use_enabled(pkg)):
3585 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
3586 modified_use=self._pkg_use_enabled(pkg))
3588 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
3589 (not pkg.installed or dont_miss_updates):
3590 # Check if a higher version was rejected due to user
3591 # USE configuration. The packages_with_invalid_use_config
3592 # list only contains unbuilt ebuilds since USE can't
3593 # be changed for built packages.
3594 higher_version_rejected = False
3595 repo_priority = pkg.repo_priority
3596 for rejected in packages_with_invalid_use_config:
3597 if rejected.cp != pkg.cp:
3600 higher_version_rejected = True
3602 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
3603 # If version is identical then compare
3604 # repo priority (see bug #350254).
3605 rej_repo_priority = rejected.repo_priority
3606 if rej_repo_priority is not None and \
3607 (repo_priority is None or
3608 rej_repo_priority > repo_priority):
3609 higher_version_rejected = True
3611 if higher_version_rejected:
3615 # Make --noreplace take precedence over --newuse.
3616 if not pkg.installed and noreplace and \
3617 cpv in vardb.match(atom):
3618 inst_pkg = self._pkg(pkg.cpv, "installed",
3619 root_config, installed=True)
3620 if inst_pkg.visible:
3621 # If the installed version is masked, it may
3622 # be necessary to look at lower versions,
3623 # in case there is a visible downgrade.
3625 reinstall_for_flags = None
3627 if not pkg.installed or \
3628 (matched_packages and not avoid_update):
3629 # Only enforce visibility on installed packages
3630 # if there is at least one other visible package
3631 # available. By filtering installed masked packages
3632 # here, packages that have been masked since they
3633 # were installed can be automatically downgraded
3634 # to an unmasked version. NOTE: This code needs to
3635 # be consistent with masking behavior inside
3636 # _dep_check_composite_db, in order to prevent
3637 # incorrect choices in || deps like bug #351828.
3639 if not self._pkg_visibility_check(pkg, \
3640 allow_unstable_keywords=allow_unstable_keywords,
3641 allow_license_changes=allow_license_changes,
3642 allow_unmasks=allow_unmasks):
3645 # Enable upgrade or downgrade to a version
3646 # with visible KEYWORDS when the installed
3647 # version is masked by KEYWORDS, but never
3648 # reinstall the same exact version only due
3649 # to a KEYWORDS mask. See bug #252167.
3651 if pkg.type_name != "ebuild" and matched_packages:
3652 # Don't re-install a binary package that is
3653 # identical to the currently installed package
3654 # (see bug #354441).
3655 identical_binary = False
3656 if usepkg and pkg.installed:
3657 for selected_pkg in matched_packages:
3658 if selected_pkg.type_name == "binary" and \
3659 selected_pkg.cpv == pkg.cpv and \
3660 selected_pkg.metadata.get('BUILD_TIME') == \
3661 pkg.metadata.get('BUILD_TIME'):
3662 identical_binary = True
3665 if not identical_binary:
3666 # If the ebuild no longer exists or it's
3667 # keywords have been dropped, reject built
3668 # instances (installed or binary).
3669 # If --usepkgonly is enabled, assume that
3670 # the ebuild status should be ignored.
3671 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
3672 if pkg.installed and pkg.masks:
3677 pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
3678 except portage.exception.PackageNotFound:
3679 pkg_eb_visible = False
3680 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3681 "ebuild", Atom("=%s" % (pkg.cpv,))):
3682 if self._pkg_visibility_check(pkg_eb, \
3683 allow_unstable_keywords=allow_unstable_keywords,
3684 allow_license_changes=allow_license_changes,
3685 allow_unmasks=allow_unmasks):
3686 pkg_eb_visible = True
3688 if not pkg_eb_visible:
3691 if not self._pkg_visibility_check(pkg_eb, \
3692 allow_unstable_keywords=allow_unstable_keywords,
3693 allow_license_changes=allow_license_changes,
3694 allow_unmasks=allow_unmasks):
3697 # Calculation of USE for unbuilt ebuilds is relatively
3698 # expensive, so it is only performed lazily, after the
3699 # above visibility checks are complete.
3702 if root == self._frozen_config.target_root:
3704 myarg = next(self._iter_atoms_for_pkg(pkg))
3705 except StopIteration:
3707 except portage.exception.InvalidDependString:
3709 # masked by corruption
3711 if not installed and myarg:
3712 found_available_arg = True
3714 if atom.unevaluated_atom.use:
3715 #Make sure we don't miss a 'missing IUSE'.
3716 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3717 # Don't add this to packages_with_invalid_use_config
3718 # since IUSE cannot be adjusted by the user.
3723 matched_pkgs_ignore_use.append(pkg)
3724 if allow_use_changes:
3726 for flag in atom.use.enabled:
3727 target_use[flag] = True
3728 for flag in atom.use.disabled:
3729 target_use[flag] = False
3730 use = self._pkg_use_enabled(pkg, target_use)
3732 use = self._pkg_use_enabled(pkg)
3735 can_adjust_use = not pkg.built
3736 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3737 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3739 if atom.use.enabled:
3740 if atom.use.enabled.intersection(missing_disabled):
3742 can_adjust_use = False
3743 need_enabled = atom.use.enabled.difference(use)
3745 need_enabled = need_enabled.difference(missing_enabled)
3749 if pkg.use.mask.intersection(need_enabled):
3750 can_adjust_use = False
3752 if atom.use.disabled:
3753 if atom.use.disabled.intersection(missing_enabled):
3755 can_adjust_use = False
3756 need_disabled = atom.use.disabled.intersection(use)
3758 need_disabled = need_disabled.difference(missing_disabled)
3762 if pkg.use.force.difference(
3763 pkg.use.mask).intersection(need_disabled):
3764 can_adjust_use = False
3768 # Above we must ensure that this package has
3769 # absolutely no use.force, use.mask, or IUSE
3770 # issues that the user typically can't make
3771 # adjustments to solve (see bug #345979).
3772 # FIXME: Conditional USE deps complicate
3773 # issues. This code currently excludes cases
3774 # in which the user can adjust the parent
3775 # package's USE in order to satisfy the dep.
3776 packages_with_invalid_use_config.append(pkg)
3779 if pkg.cp == atom_cp:
3780 if highest_version is None:
3781 highest_version = pkg
3782 elif pkg > highest_version:
3783 highest_version = pkg
3784 # At this point, we've found the highest visible
3785 # match from the current repo. Any lower versions
3786 # from this repo are ignored, so this so the loop
3787 # will always end with a break statement below
3789 if find_existing_node:
3790 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3793 # Use PackageSet.findAtomForPackage()
3794 # for PROVIDE support.
3795 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3796 if highest_version and \
3797 e_pkg.cp == atom_cp and \
3798 e_pkg < highest_version and \
3799 e_pkg.slot_atom != highest_version.slot_atom:
3800 # There is a higher version available in a
3801 # different slot, so this existing node is
3805 matched_packages.append(e_pkg)
3806 existing_node = e_pkg
3808 # Compare built package to current config and
3809 # reject the built package if necessary.
3810 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
3811 ("--newuse" in self._frozen_config.myopts or \
3812 "--reinstall" in self._frozen_config.myopts or \
3813 "--binpkg-respect-use" in self._frozen_config.myopts):
3814 iuses = pkg.iuse.all
3815 old_use = self._pkg_use_enabled(pkg)
3817 pkgsettings.setcpv(myeb)
3819 pkgsettings.setcpv(pkg)
3820 now_use = pkgsettings["PORTAGE_USE"].split()
3821 forced_flags = set()
3822 forced_flags.update(pkgsettings.useforce)
3823 forced_flags.update(pkgsettings.usemask)
3825 if myeb and not usepkgonly and not useoldpkg:
3826 cur_iuse = myeb.iuse.all
3827 if self._reinstall_for_flags(forced_flags,
3831 # Compare current config to installed package
3832 # and do not reinstall if possible.
3833 if not installed and not useoldpkg and \
3834 ("--newuse" in self._frozen_config.myopts or \
3835 "--reinstall" in self._frozen_config.myopts) and \
3836 cpv in vardb.match(atom):
3837 forced_flags = set()
3838 forced_flags.update(pkg.use.force)
3839 forced_flags.update(pkg.use.mask)
3840 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
3841 old_use = inst_pkg.use.enabled
3842 old_iuse = inst_pkg.iuse.all
3843 cur_use = self._pkg_use_enabled(pkg)
3844 cur_iuse = pkg.iuse.all
3845 reinstall_for_flags = \
3846 self._reinstall_for_flags(
3847 forced_flags, old_use, old_iuse,
3849 if reinstall_for_flags:
3851 if reinstall_atoms.findAtomForPackage(pkg, \
3852 modified_use=self._pkg_use_enabled(pkg)):
3857 matched_oldpkg.append(pkg)
3858 matched_packages.append(pkg)
3859 if reinstall_for_flags:
3860 self._dynamic_config._reinstall_nodes[pkg] = \
3864 if not matched_packages:
3867 if "--debug" in self._frozen_config.myopts:
3868 for pkg in matched_packages:
3869 portage.writemsg("%s %s\n" % \
3870 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3872 # Filter out any old-style virtual matches if they are
3873 # mixed with new-style virtual matches.
3875 if len(matched_packages) > 1 and \
3876 "virtual" == portage.catsplit(cp)[0]:
3877 for pkg in matched_packages:
3880 # Got a new-style virtual, so filter
3881 # out any old-style virtuals.
3882 matched_packages = [pkg for pkg in matched_packages \
3886 if existing_node is not None and \
3887 existing_node in matched_packages:
3888 return existing_node, existing_node
3890 if len(matched_packages) > 1:
3891 if rebuilt_binaries:
3894 for pkg in matched_packages:
3899 if built_pkg is not None and inst_pkg is not None:
3900 # Only reinstall if binary package BUILD_TIME is
3901 # non-empty, in order to avoid cases like to
3902 # bug #306659 where BUILD_TIME fields are missing
3903 # in local and/or remote Packages file.
3905 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3906 except (KeyError, ValueError):
3910 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3911 except (KeyError, ValueError):
3912 installed_timestamp = 0
3914 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3915 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3916 if built_timestamp and \
3917 built_timestamp > installed_timestamp and \
3918 built_timestamp >= minimal_timestamp:
3919 return built_pkg, existing_node
3921 #Don't care if the binary has an older BUILD_TIME than the installed
3922 #package. This is for closely tracking a binhost.
3923 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3925 if built_timestamp and \
3926 built_timestamp != installed_timestamp:
3927 return built_pkg, existing_node
3929 for pkg in matched_packages:
3930 if pkg.installed and pkg.invalid:
3931 matched_packages = [x for x in \
3932 matched_packages if x is not pkg]
3935 for pkg in matched_packages:
3936 if pkg.installed and self._pkg_visibility_check(pkg, \
3937 allow_unstable_keywords=allow_unstable_keywords,
3938 allow_license_changes=allow_license_changes,
3939 allow_unmasks=allow_unmasks):
3940 return pkg, existing_node
3942 visible_matches = []
3944 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
3945 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3946 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
3947 if not visible_matches:
3948 visible_matches = [pkg.cpv for pkg in matched_packages \
3949 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3950 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
3952 bestmatch = portage.best(visible_matches)
3954 # all are masked, so ignore visibility
3955 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
3956 matched_packages = [pkg for pkg in matched_packages \
3957 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3959 # ordered by type preference ("ebuild" type is the last resort)
3960 return matched_packages[-1], existing_node
3962 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3964 Select packages that have already been added to the graph or
3965 those that are installed and have not been scheduled for
3968 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3969 matches = graph_db.match_pkgs(atom)
3972 pkg = matches[-1] # highest match
3973 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3974 return pkg, in_graph
3976 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3978 Select packages that are installed.
3980 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3981 matches = vardb.match_pkgs(atom)
3984 if len(matches) > 1:
3985 unmasked = [pkg for pkg in matches if \
3986 self._pkg_visibility_check(pkg)]
3988 if len(unmasked) == 1:
3991 # Account for packages with masks (like KEYWORDS masks)
3992 # that are usually ignored in visibility checks for
3993 # installed packages, in order to handle cases like
3995 unmasked = [pkg for pkg in matches if not pkg.masks]
3998 pkg = matches[-1] # highest match
3999 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4000 return pkg, in_graph
4002 def _complete_graph(self, required_sets=None):
4004 Add any deep dependencies of required sets (args, system, world) that
4005 have not been pulled into the graph yet. This ensures that the graph
4006 is consistent such that initially satisfied deep dependencies are not
4007 broken in the new graph. Initially unsatisfied dependencies are
4008 irrelevant since we only want to avoid breaking dependencies that are
4009 initially satisfied.
4011 Since this method can consume enough time to disturb users, it is
4012 currently only enabled by the --complete-graph option.
4014 @param required_sets: contains required sets (currently only used
4015 for depclean and prune removal operations)
4016 @type required_sets: dict
4018 if "--buildpkgonly" in self._frozen_config.myopts or \
4019 "recurse" not in self._dynamic_config.myparams:
4022 if "complete" not in self._dynamic_config.myparams:
4023 # Automatically enable complete mode if there are any
4024 # downgrades, since they often break dependencies
4025 # (like in bug #353613).
4026 have_downgrade = False
4027 for node in self._dynamic_config.digraph:
4028 if not isinstance(node, Package) or \
4029 node.operation != "merge":
4031 vardb = self._frozen_config.roots[
4032 node.root].trees["vartree"].dbapi
4033 inst_pkg = vardb.match_pkgs(node.slot_atom)
4034 if inst_pkg and inst_pkg[0] > node:
4035 have_downgrade = True
4039 self._dynamic_config.myparams["complete"] = True
4041 # Skip complete graph mode, in order to avoid consuming
4042 # enough time to disturb users.
4047 # Put the depgraph into a mode that causes it to only
4048 # select packages that have already been added to the
4049 # graph or those that are installed and have not been
4050 # scheduled for replacement. Also, toggle the "deep"
4051 # parameter so that all dependencies are traversed and
4053 self._select_atoms = self._select_atoms_from_graph
4054 if "remove" in self._dynamic_config.myparams:
4055 self._select_package = self._select_pkg_from_installed
4057 self._select_package = self._select_pkg_from_graph
4058 self._dynamic_config._traverse_ignored_deps = True
4059 already_deep = self._dynamic_config.myparams.get("deep") is True
4060 if not already_deep:
4061 self._dynamic_config.myparams["deep"] = True
4063 # Invalidate the package selection cache, since
4064 # _select_package has just changed implementations.
4065 for trees in self._dynamic_config._filtered_trees.values():
4066 trees["porttree"].dbapi._clear_cache()
4068 args = self._dynamic_config._initial_arg_list[:]
4069 for root in self._frozen_config.roots:
4070 if root != self._frozen_config.target_root and \
4071 "remove" in self._dynamic_config.myparams:
4072 # Only pull in deps for the relevant root.
4074 depgraph_sets = self._dynamic_config.sets[root]
4075 required_set_names = self._frozen_config._required_set_names.copy()
4076 remaining_args = required_set_names.copy()
4077 if required_sets is None or root not in required_sets:
4080 # Removal actions may override sets with temporary
4081 # replacements that have had atoms removed in order
4082 # to implement --deselect behavior.
4083 required_set_names = set(required_sets[root])
4084 depgraph_sets.sets.clear()
4085 depgraph_sets.sets.update(required_sets[root])
4086 if "remove" not in self._dynamic_config.myparams and \
4087 root == self._frozen_config.target_root and \
4089 remaining_args.difference_update(depgraph_sets.sets)
4090 if not remaining_args and \
4091 not self._dynamic_config._ignored_deps and \
4092 not self._dynamic_config._dep_stack:
4094 root_config = self._frozen_config.roots[root]
4095 for s in required_set_names:
4096 pset = depgraph_sets.sets.get(s)
4098 pset = root_config.sets[s]
4099 atom = SETPREFIX + s
4100 args.append(SetArg(arg=atom, pset=pset,
4101 root_config=root_config))
4103 self._set_args(args)
4104 for arg in self._expand_set_args(args, add_to_digraph=True):
4105 for atom in arg.pset.getAtoms():
4106 self._dynamic_config._dep_stack.append(
4107 Dependency(atom=atom, root=arg.root_config.root,
4111 if self._dynamic_config._ignored_deps:
4112 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4113 self._dynamic_config._ignored_deps = []
4114 if not self._create_graph(allow_unsatisfied=True):
4116 # Check the unsatisfied deps to see if any initially satisfied deps
4117 # will become unsatisfied due to an upgrade. Initially unsatisfied
4118 # deps are irrelevant since we only want to avoid breaking deps
4119 # that are initially satisfied.
4120 while self._dynamic_config._unsatisfied_deps:
4121 dep = self._dynamic_config._unsatisfied_deps.pop()
4122 vardb = self._frozen_config.roots[
4123 dep.root].trees["vartree"].dbapi
4124 matches = vardb.match_pkgs(dep.atom)
4126 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4128 # An scheduled installation broke a deep dependency.
4129 # Add the installed package to the graph so that it
4130 # will be appropriately reported as a slot collision
4131 # (possibly solvable via backtracking).
4132 pkg = matches[-1] # highest match
4133 if not self._add_pkg(pkg, dep):
4135 if not self._create_graph(allow_unsatisfied=True):
4139 def _pkg(self, cpv, type_name, root_config, installed=False,
4140 onlydeps=False, myrepo = None):
4142 Get a package instance from the cache, or create a new
4143 one if necessary. Raises PackageNotFound from aux_get if it
4144 failures for some reason (package does not exist or is
4148 # Ensure that we use the specially optimized RootConfig instance
4149 # that refers to FakeVartree instead of the real vartree.
4150 root_config = self._frozen_config.roots[root_config.root]
4151 pkg = self._frozen_config._pkg_cache.get(
4152 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4153 repo_name=myrepo, root_config=root_config,
4154 installed=installed, onlydeps=onlydeps))
4155 if pkg is None and onlydeps and not installed:
4156 # Maybe it already got pulled in as a "merge" node.
4157 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4158 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4159 repo_name=myrepo, root_config=root_config,
4160 installed=installed, onlydeps=False))
4163 tree_type = self.pkg_tree_map[type_name]
4164 db = root_config.trees[tree_type].dbapi
4165 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4166 tree_type].dbapi._aux_cache_keys)
4169 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4171 raise portage.exception.PackageNotFound(cpv)
4173 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4174 installed=installed, metadata=metadata, onlydeps=onlydeps,
4175 root_config=root_config, type_name=type_name)
4177 self._frozen_config._pkg_cache[pkg] = pkg
4179 if not self._pkg_visibility_check(pkg) and \
4180 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4181 slot_key = (pkg.root, pkg.slot_atom)
4182 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4183 if other_pkg is None or pkg > other_pkg:
4184 self._frozen_config._highest_license_masked[slot_key] = pkg
4188 def _validate_blockers(self):
4189 """Remove any blockers from the digraph that do not match any of the
4190 packages within the graph. If necessary, create hard deps to ensure
4191 correct merge order such that mutually blocking packages are never
4192 installed simultaneously. Also add runtime blockers from all installed
4193 packages if any of them haven't been added already (bug 128809)."""
4195 if "--buildpkgonly" in self._frozen_config.myopts or \
4196 "--nodeps" in self._frozen_config.myopts:
4199 complete = "complete" in self._dynamic_config.myparams
4200 deep = "deep" in self._dynamic_config.myparams
4203 # Pull in blockers from all installed packages that haven't already
4204 # been pulled into the depgraph, in order to ensure that the are
4205 # respected (bug 128809). Due to the performance penalty that is
4206 # incurred by all the additional dep_check calls that are required,
4207 # blockers returned from dep_check are cached on disk by the
4208 # BlockerCache class.
4210 # For installed packages, always ignore blockers from DEPEND since
4211 # only runtime dependencies should be relevant for packages that
4212 # are already built.
4213 dep_keys = ["RDEPEND", "PDEPEND"]
4214 for myroot in self._frozen_config.trees:
4215 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4216 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4217 pkgsettings = self._frozen_config.pkgsettings[myroot]
4218 root_config = self._frozen_config.roots[myroot]
4219 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
4220 final_db = self._dynamic_config.mydbapi[myroot]
4222 blocker_cache = BlockerCache(myroot, vardb)
4223 stale_cache = set(blocker_cache)
4226 stale_cache.discard(cpv)
4227 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4229 pkg in self._dynamic_config._traversed_pkg_deps
4231 # Check for masked installed packages. Only warn about
4232 # packages that are in the graph in order to avoid warning
4233 # about those that will be automatically uninstalled during
4234 # the merge process or by --depclean. Always warn about
4235 # packages masked by license, since the user likely wants
4236 # to adjust ACCEPT_LICENSE.
4238 if not self._pkg_visibility_check(pkg) and \
4239 (pkg_in_graph or 'LICENSE' in pkg.masks):
4240 self._dynamic_config._masked_installed.add(pkg)
4242 self._check_masks(pkg)
4244 blocker_atoms = None
4250 self._dynamic_config._blocker_parents.child_nodes(pkg))
4255 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4259 # Select just the runtime blockers.
4260 blockers = [blocker for blocker in blockers \
4261 if blocker.priority.runtime or \
4262 blocker.priority.runtime_post]
4263 if blockers is not None:
4264 blockers = set(blocker.atom for blocker in blockers)
4266 # If this node has any blockers, create a "nomerge"
4267 # node for it so that they can be enforced.
4268 self._spinner_update()
4269 blocker_data = blocker_cache.get(cpv)
4270 if blocker_data is not None and \
4271 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4274 # If blocker data from the graph is available, use
4275 # it to validate the cache and update the cache if
4277 if blocker_data is not None and \
4278 blockers is not None:
4279 if not blockers.symmetric_difference(
4280 blocker_data.atoms):
4284 if blocker_data is None and \
4285 blockers is not None:
4286 # Re-use the blockers from the graph.
4287 blocker_atoms = sorted(blockers)
4288 counter = long(pkg.metadata["COUNTER"])
4290 blocker_cache.BlockerData(counter, blocker_atoms)
4291 blocker_cache[pkg.cpv] = blocker_data
4295 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4297 # Use aux_get() to trigger FakeVartree global
4298 # updates on *DEPEND when appropriate.
4299 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4300 # It is crucial to pass in final_db here in order to
4301 # optimize dep_check calls by eliminating atoms via
4302 # dep_wordreduce and dep_eval calls.
4304 success, atoms = portage.dep_check(depstr,
4305 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4306 trees=self._dynamic_config._graph_trees, myroot=myroot)
4309 except Exception as e:
4310 # This is helpful, for example, if a ValueError
4311 # is thrown from cpv_expand due to multiple
4312 # matches (this can happen if an atom lacks a
4314 show_invalid_depstring_notice(
4315 pkg, depstr, str(e))
4319 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4320 if replacement_pkg and \
4321 replacement_pkg[0].operation == "merge":
4322 # This package is being replaced anyway, so
4323 # ignore invalid dependencies so as not to
4324 # annoy the user too much (otherwise they'd be
4325 # forced to manually unmerge it first).
4327 show_invalid_depstring_notice(pkg, depstr, atoms)
4329 blocker_atoms = [myatom for myatom in atoms \
4331 blocker_atoms.sort()
4332 counter = long(pkg.metadata["COUNTER"])
4333 blocker_cache[cpv] = \
4334 blocker_cache.BlockerData(counter, blocker_atoms)
4337 for atom in blocker_atoms:
4338 blocker = Blocker(atom=atom,
4339 eapi=pkg.metadata["EAPI"],
4340 priority=self._priority(runtime=True),
4342 self._dynamic_config._blocker_parents.add(blocker, pkg)
4343 except portage.exception.InvalidAtom as e:
4344 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4345 show_invalid_depstring_notice(
4346 pkg, depstr, "Invalid Atom: %s" % (e,))
4348 for cpv in stale_cache:
4349 del blocker_cache[cpv]
4350 blocker_cache.flush()
4353 # Discard any "uninstall" tasks scheduled by previous calls
4354 # to this method, since those tasks may not make sense given
4355 # the current graph state.
4356 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
4357 if previous_uninstall_tasks:
4358 self._dynamic_config._blocker_uninstalls = digraph()
4359 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
4361 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
4362 self._spinner_update()
4363 root_config = self._frozen_config.roots[blocker.root]
4364 virtuals = root_config.settings.getvirtuals()
4365 myroot = blocker.root
4366 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
4367 final_db = self._dynamic_config.mydbapi[myroot]
4369 provider_virtual = False
4370 if blocker.cp in virtuals and \
4371 not self._have_new_virt(blocker.root, blocker.cp):
4372 provider_virtual = True
4374 # Use this to check PROVIDE for each matched package
4376 atom_set = InternalPackageSet(
4377 initial_atoms=[blocker.atom])
4379 if provider_virtual:
4381 for provider_entry in virtuals[blocker.cp]:
4382 atoms.append(Atom(blocker.atom.replace(
4383 blocker.cp, provider_entry.cp, 1)))
4385 atoms = [blocker.atom]
4387 blocked_initial = set()
4389 for pkg in initial_db.match_pkgs(atom):
4390 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4391 blocked_initial.add(pkg)
4393 blocked_final = set()
4395 for pkg in final_db.match_pkgs(atom):
4396 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4397 blocked_final.add(pkg)
4399 if not blocked_initial and not blocked_final:
4400 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
4401 self._dynamic_config._blocker_parents.remove(blocker)
4402 # Discard any parents that don't have any more blockers.
4403 for pkg in parent_pkgs:
4404 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
4405 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
4406 self._dynamic_config._blocker_parents.remove(pkg)
4408 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4409 unresolved_blocks = False
4410 depends_on_order = set()
4411 for pkg in blocked_initial:
4412 if pkg.slot_atom == parent.slot_atom and \
4413 not blocker.atom.blocker.overlap.forbid:
4414 # New !!atom blockers do not allow temporary
4415 # simulaneous installation, so unlike !atom
4416 # blockers, !!atom blockers aren't ignored
4417 # when they match other packages occupying
4420 if parent.installed:
4421 # Two currently installed packages conflict with
4422 # eachother. Ignore this case since the damage
4423 # is already done and this would be likely to
4424 # confuse users if displayed like a normal blocker.
4427 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4429 if parent.operation == "merge":
4430 # Maybe the blocked package can be replaced or simply
4431 # unmerged to resolve this block.
4432 depends_on_order.add((pkg, parent))
4434 # None of the above blocker resolutions techniques apply,
4435 # so apparently this one is unresolvable.
4436 unresolved_blocks = True
4437 for pkg in blocked_final:
4438 if pkg.slot_atom == parent.slot_atom and \
4439 not blocker.atom.blocker.overlap.forbid:
4440 # New !!atom blockers do not allow temporary
4441 # simulaneous installation, so unlike !atom
4442 # blockers, !!atom blockers aren't ignored
4443 # when they match other packages occupying
4446 if parent.operation == "nomerge" and \
4447 pkg.operation == "nomerge":
4448 # This blocker will be handled the next time that a
4449 # merge of either package is triggered.
4452 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4454 # Maybe the blocking package can be
4455 # unmerged to resolve this block.
4456 if parent.operation == "merge" and pkg.installed:
4457 depends_on_order.add((pkg, parent))
4459 elif parent.operation == "nomerge":
4460 depends_on_order.add((parent, pkg))
4462 # None of the above blocker resolutions techniques apply,
4463 # so apparently this one is unresolvable.
4464 unresolved_blocks = True
4466 # Make sure we don't unmerge any package that have been pulled
4468 if not unresolved_blocks and depends_on_order:
4469 for inst_pkg, inst_task in depends_on_order:
4470 if self._dynamic_config.digraph.contains(inst_pkg) and \
4471 self._dynamic_config.digraph.parent_nodes(inst_pkg):
4472 unresolved_blocks = True
4475 if not unresolved_blocks and depends_on_order:
4476 for inst_pkg, inst_task in depends_on_order:
4477 uninst_task = Package(built=inst_pkg.built,
4478 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4479 metadata=inst_pkg.metadata,
4480 operation="uninstall",
4481 root_config=inst_pkg.root_config,
4482 type_name=inst_pkg.type_name)
4483 # Enforce correct merge order with a hard dep.
4484 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
4485 priority=BlockerDepPriority.instance)
4486 # Count references to this blocker so that it can be
4487 # invalidated after nodes referencing it have been
4489 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
4490 if not unresolved_blocks and not depends_on_order:
4491 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
4492 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
4493 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
4494 self._dynamic_config._blocker_parents.remove(blocker)
4495 if not self._dynamic_config._blocker_parents.child_nodes(parent):
4496 self._dynamic_config._blocker_parents.remove(parent)
4497 if unresolved_blocks:
4498 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
4502 def _accept_blocker_conflicts(self):
4504 for x in ("--buildpkgonly", "--fetchonly",
4505 "--fetch-all-uri", "--nodeps"):
4506 if x in self._frozen_config.myopts:
4511 def _merge_order_bias(self, mygraph):
4513 For optimal leaf node selection, promote deep system runtime deps and
4514 order nodes from highest to lowest overall reference count.
4518 for node in mygraph.order:
4519 node_info[node] = len(mygraph.parent_nodes(node))
4520 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4522 def cmp_merge_preference(node1, node2):
4524 if node1.operation == 'uninstall':
4525 if node2.operation == 'uninstall':
4529 if node2.operation == 'uninstall':
4530 if node1.operation == 'uninstall':
4534 node1_sys = node1 in deep_system_deps
4535 node2_sys = node2 in deep_system_deps
4536 if node1_sys != node2_sys:
4541 return node_info[node2] - node_info[node1]
4543 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4545 def altlist(self, reversed=False):
4547 while self._dynamic_config._serialized_tasks_cache is None:
4548 self._resolve_conflicts()
4550 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
4551 self._serialize_tasks()
4552 except self._serialize_tasks_retry:
4555 retlist = self._dynamic_config._serialized_tasks_cache[:]
4560 def _implicit_libc_deps(self, mergelist, graph):
4562 Create implicit dependencies on libc, in order to ensure that libc
4563 is installed as early as possible (see bug #303567).
4566 implicit_libc_roots = (self._frozen_config._running_root.root,)
4567 for root in implicit_libc_roots:
4568 graphdb = self._dynamic_config.mydbapi[root]
4569 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4570 for atom in self._expand_virt_from_graph(root,
4571 portage.const.LIBC_PACKAGE_ATOM):
4574 match = graphdb.match_pkgs(atom)
4578 if pkg.operation == "merge" and \
4579 not vardb.cpv_exists(pkg.cpv):
4580 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
4585 earlier_libc_pkgs = set()
4587 for pkg in mergelist:
4588 if not isinstance(pkg, Package):
4589 # a satisfied blocker
4591 root_libc_pkgs = libc_pkgs.get(pkg.root)
4592 if root_libc_pkgs is not None and \
4593 pkg.operation == "merge":
4594 if pkg in root_libc_pkgs:
4595 earlier_libc_pkgs.add(pkg)
4597 for libc_pkg in root_libc_pkgs:
4598 if libc_pkg in earlier_libc_pkgs:
4599 graph.add(libc_pkg, pkg,
4600 priority=DepPriority(buildtime=True))
4602 def schedulerGraph(self):
4604 The scheduler graph is identical to the normal one except that
4605 uninstall edges are reversed in specific cases that require
4606 conflicting packages to be temporarily installed simultaneously.
4607 This is intended for use by the Scheduler in it's parallelization
4608 logic. It ensures that temporary simultaneous installation of
4609 conflicting packages is avoided when appropriate (especially for
4610 !!atom blockers), but allowed in specific cases that require it.
4612 Note that this method calls break_refs() which alters the state of
4613 internal Package instances such that this depgraph instance should
4614 not be used to perform any more calculations.
4617 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4618 mergelist = self.altlist()
4619 self._implicit_libc_deps(mergelist,
4620 self._dynamic_config._scheduler_graph)
4622 # Break DepPriority.satisfied attributes which reference
4623 # installed Package instances.
4624 for parents, children, node in \
4625 self._dynamic_config._scheduler_graph.nodes.values():
4626 for priorities in chain(parents.values(), children.values()):
4627 for priority in priorities:
4628 if priority.satisfied:
4629 priority.satisfied = True
4631 pkg_cache = self._frozen_config._pkg_cache
4632 graph = self._dynamic_config._scheduler_graph
4633 trees = self._frozen_config.trees
4634 pruned_pkg_cache = {}
4635 for key, pkg in pkg_cache.items():
4636 if pkg in graph or \
4637 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4638 pruned_pkg_cache[key] = pkg
4641 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4645 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4649 def break_refs(self):
4651 Break any references in Package instances that lead back to the depgraph.
4652 This is useful if you want to hold references to packages without also
4653 holding the depgraph on the heap. It should only be called after the
4654 depgraph and _frozen_config will not be used for any more calculations.
4656 for root_config in self._frozen_config.roots.values():
4657 root_config.update(self._frozen_config._trees_orig[
4658 root_config.root]["root_config"])
4659 # Both instances are now identical, so discard the
4660 # original which should have no other references.
4661 self._frozen_config._trees_orig[
4662 root_config.root]["root_config"] = root_config
4664 def _resolve_conflicts(self):
4665 if not self._complete_graph():
4666 raise self._unknown_internal_error()
4668 if not self._validate_blockers():
4669 self._dynamic_config._skip_restart = True
4670 raise self._unknown_internal_error()
4672 if self._dynamic_config._slot_collision_info:
4673 self._process_slot_conflicts()
4675 def _serialize_tasks(self):
4677 if "--debug" in self._frozen_config.myopts:
4678 writemsg("\ndigraph:\n\n", noiselevel=-1)
4679 self._dynamic_config.digraph.debug_print()
4680 writemsg("\n", noiselevel=-1)
4682 scheduler_graph = self._dynamic_config.digraph.copy()
4684 if '--nodeps' in self._frozen_config.myopts:
4685 # Preserve the package order given on the command line.
4686 return ([node for node in scheduler_graph \
4687 if isinstance(node, Package) \
4688 and node.operation == 'merge'], scheduler_graph)
4690 mygraph=self._dynamic_config.digraph.copy()
4692 removed_nodes = set()
4694 # Prune off all DependencyArg instances since they aren't
4695 # needed, and because of nested sets this is faster than doing
4696 # it with multiple digraph.root_nodes() calls below. This also
4697 # takes care of nested sets that have circular references,
4698 # which wouldn't be matched by digraph.root_nodes().
4699 for node in mygraph:
4700 if isinstance(node, DependencyArg):
4701 removed_nodes.add(node)
4703 mygraph.difference_update(removed_nodes)
4704 removed_nodes.clear()
4706 # Prune "nomerge" root nodes if nothing depends on them, since
4707 # otherwise they slow down merge order calculation. Don't remove
4708 # non-root nodes since they help optimize merge order in some cases
4709 # such as revdep-rebuild.
4712 for node in mygraph.root_nodes():
4713 if not isinstance(node, Package) or \
4714 node.installed or node.onlydeps:
4715 removed_nodes.add(node)
4717 self._spinner_update()
4718 mygraph.difference_update(removed_nodes)
4719 if not removed_nodes:
4721 removed_nodes.clear()
4722 self._merge_order_bias(mygraph)
4723 def cmp_circular_bias(n1, n2):
4725 RDEPEND is stronger than PDEPEND and this function
4726 measures such a strength bias within a circular
4727 dependency relationship.
4729 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4730 ignore_priority=priority_range.ignore_medium_soft)
4731 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4732 ignore_priority=priority_range.ignore_medium_soft)
4733 if n1_n2_medium == n2_n1_medium:
4738 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4740 # Contains uninstall tasks that have been scheduled to
4741 # occur after overlapping blockers have been installed.
4742 scheduled_uninstalls = set()
4743 # Contains any Uninstall tasks that have been ignored
4744 # in order to avoid the circular deps code path. These
4745 # correspond to blocker conflicts that could not be
4747 ignored_uninstall_tasks = set()
4748 have_uninstall_task = False
4749 complete = "complete" in self._dynamic_config.myparams
4752 def get_nodes(**kwargs):
4754 Returns leaf nodes excluding Uninstall instances
4755 since those should be executed as late as possible.
4757 return [node for node in mygraph.leaf_nodes(**kwargs) \
4758 if isinstance(node, Package) and \
4759 (node.operation != "uninstall" or \
4760 node in scheduled_uninstalls)]
4762 # sys-apps/portage needs special treatment if ROOT="/"
4763 running_root = self._frozen_config._running_root.root
4764 runtime_deps = InternalPackageSet(
4765 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4766 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4767 PORTAGE_PACKAGE_ATOM)
4768 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4769 PORTAGE_PACKAGE_ATOM)
4772 running_portage = running_portage[0]
4774 running_portage = None
4776 if replacement_portage:
4777 replacement_portage = replacement_portage[0]
4779 replacement_portage = None
4781 if replacement_portage == running_portage:
4782 replacement_portage = None
4784 if replacement_portage is not None and \
4785 (running_portage is None or \
4786 running_portage.cpv != replacement_portage.cpv or \
4787 '9999' in replacement_portage.cpv or \
4788 'git' in replacement_portage.inherited or \
4789 'git-2' in replacement_portage.inherited):
4790 # update from running_portage to replacement_portage asap
4791 asap_nodes.append(replacement_portage)
4793 if running_portage is not None:
4795 portage_rdepend = self._select_atoms_highest_available(
4796 running_root, running_portage.metadata["RDEPEND"],
4797 myuse=self._pkg_use_enabled(running_portage),
4798 parent=running_portage, strict=False)
4799 except portage.exception.InvalidDependString as e:
4800 portage.writemsg("!!! Invalid RDEPEND in " + \
4801 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4802 (running_root, running_portage.cpv, e), noiselevel=-1)
4804 portage_rdepend = {running_portage : []}
4805 for atoms in portage_rdepend.values():
4806 runtime_deps.update(atom for atom in atoms \
4807 if not atom.blocker)
4809 # Merge libc asap, in order to account for implicit
4810 # dependencies. See bug #303567.
4811 implicit_libc_roots = (running_root,)
4812 for root in implicit_libc_roots:
4814 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4815 graphdb = self._dynamic_config.mydbapi[root]
4816 for atom in self._expand_virt_from_graph(root,
4817 portage.const.LIBC_PACKAGE_ATOM):
4820 match = graphdb.match_pkgs(atom)
4824 if pkg.operation == "merge" and \
4825 not vardb.cpv_exists(pkg.cpv):
4829 # If there's also an os-headers upgrade, we need to
4830 # pull that in first. See bug #328317.
4831 for atom in self._expand_virt_from_graph(root,
4832 portage.const.OS_HEADERS_PACKAGE_ATOM):
4835 match = graphdb.match_pkgs(atom)
4839 if pkg.operation == "merge" and \
4840 not vardb.cpv_exists(pkg.cpv):
4841 asap_nodes.append(pkg)
4843 asap_nodes.extend(libc_pkgs)
4845 def gather_deps(ignore_priority, mergeable_nodes,
4846 selected_nodes, node):
4848 Recursively gather a group of nodes that RDEPEND on
4849 eachother. This ensures that they are merged as a group
4850 and get their RDEPENDs satisfied as soon as possible.
4852 if node in selected_nodes:
4854 if node not in mergeable_nodes:
4856 if node == replacement_portage and \
4857 mygraph.child_nodes(node,
4858 ignore_priority=priority_range.ignore_medium_soft):
4859 # Make sure that portage always has all of it's
4860 # RDEPENDs installed first.
4862 selected_nodes.add(node)
4863 for child in mygraph.child_nodes(node,
4864 ignore_priority=ignore_priority):
4865 if not gather_deps(ignore_priority,
4866 mergeable_nodes, selected_nodes, child):
4870 def ignore_uninst_or_med(priority):
4871 if priority is BlockerDepPriority.instance:
4873 return priority_range.ignore_medium(priority)
4875 def ignore_uninst_or_med_soft(priority):
4876 if priority is BlockerDepPriority.instance:
4878 return priority_range.ignore_medium_soft(priority)
4880 tree_mode = "--tree" in self._frozen_config.myopts
4881 # Tracks whether or not the current iteration should prefer asap_nodes
4882 # if available. This is set to False when the previous iteration
4883 # failed to select any nodes. It is reset whenever nodes are
4884 # successfully selected.
4887 # Controls whether or not the current iteration should drop edges that
4888 # are "satisfied" by installed packages, in order to solve circular
4889 # dependencies. The deep runtime dependencies of installed packages are
4890 # not checked in this case (bug #199856), so it must be avoided
4891 # whenever possible.
4892 drop_satisfied = False
4894 # State of variables for successive iterations that loosen the
4895 # criteria for node selection.
4897 # iteration prefer_asap drop_satisfied
4902 # If no nodes are selected on the last iteration, it is due to
4903 # unresolved blockers or circular dependencies.
4905 while not mygraph.empty():
4906 self._spinner_update()
4907 selected_nodes = None
4908 ignore_priority = None
4909 if drop_satisfied or (prefer_asap and asap_nodes):
4910 priority_range = DepPrioritySatisfiedRange
4912 priority_range = DepPriorityNormalRange
4913 if prefer_asap and asap_nodes:
4914 # ASAP nodes are merged before their soft deps. Go ahead and
4915 # select root nodes here if necessary, since it's typical for
4916 # the parent to have been removed from the graph already.
4917 asap_nodes = [node for node in asap_nodes \
4918 if mygraph.contains(node)]
4919 for node in asap_nodes:
4920 if not mygraph.child_nodes(node,
4921 ignore_priority=priority_range.ignore_soft):
4922 selected_nodes = [node]
4923 asap_nodes.remove(node)
4925 if not selected_nodes and \
4926 not (prefer_asap and asap_nodes):
4927 for i in range(priority_range.NONE,
4928 priority_range.MEDIUM_SOFT + 1):
4929 ignore_priority = priority_range.ignore_priority[i]
4930 nodes = get_nodes(ignore_priority=ignore_priority)
4932 # If there is a mixture of merges and uninstalls,
4933 # do the uninstalls first.
4935 good_uninstalls = []
4937 if node.operation == "uninstall":
4938 good_uninstalls.append(node)
4941 nodes = good_uninstalls
4945 if ignore_priority is None and not tree_mode:
4946 # Greedily pop all of these nodes since no
4947 # relationship has been ignored. This optimization
4948 # destroys --tree output, so it's disabled in tree
4950 selected_nodes = nodes
4952 # For optimal merge order:
4953 # * Only pop one node.
4954 # * Removing a root node (node without a parent)
4955 # will not produce a leaf node, so avoid it.
4956 # * It's normal for a selected uninstall to be a
4957 # root node, so don't check them for parents.
4959 if node.operation == "uninstall" or \
4960 mygraph.parent_nodes(node):
4961 selected_nodes = [node]
4967 if not selected_nodes:
4968 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4970 mergeable_nodes = set(nodes)
4971 if prefer_asap and asap_nodes:
4973 for i in range(priority_range.SOFT,
4974 priority_range.MEDIUM_SOFT + 1):
4975 ignore_priority = priority_range.ignore_priority[i]
4977 if not mygraph.parent_nodes(node):
4979 selected_nodes = set()
4980 if gather_deps(ignore_priority,
4981 mergeable_nodes, selected_nodes, node):
4984 selected_nodes = None
4988 if prefer_asap and asap_nodes and not selected_nodes:
4989 # We failed to find any asap nodes to merge, so ignore
4990 # them for the next iteration.
4994 if selected_nodes and ignore_priority is not None:
4995 # Try to merge ignored medium_soft deps as soon as possible
4996 # if they're not satisfied by installed packages.
4997 for node in selected_nodes:
4998 children = set(mygraph.child_nodes(node))
4999 soft = children.difference(
5000 mygraph.child_nodes(node,
5001 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
5002 medium_soft = children.difference(
5003 mygraph.child_nodes(node,
5005 DepPrioritySatisfiedRange.ignore_medium_soft))
5006 medium_soft.difference_update(soft)
5007 for child in medium_soft:
5008 if child in selected_nodes:
5010 if child in asap_nodes:
5012 asap_nodes.append(child)
5014 if selected_nodes and len(selected_nodes) > 1:
5015 if not isinstance(selected_nodes, list):
5016 selected_nodes = list(selected_nodes)
5017 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5019 if not selected_nodes and not myblocker_uninstalls.is_empty():
5020 # An Uninstall task needs to be executed in order to
5021 # avoid conflict if possible.
5024 priority_range = DepPrioritySatisfiedRange
5026 priority_range = DepPriorityNormalRange
5028 mergeable_nodes = get_nodes(
5029 ignore_priority=ignore_uninst_or_med)
5031 min_parent_deps = None
5034 for task in myblocker_uninstalls.leaf_nodes():
5035 # Do some sanity checks so that system or world packages
5036 # don't get uninstalled inappropriately here (only really
5037 # necessary when --complete-graph has not been enabled).
5039 if task in ignored_uninstall_tasks:
5042 if task in scheduled_uninstalls:
5043 # It's been scheduled but it hasn't
5044 # been executed yet due to dependence
5045 # on installation of blocking packages.
5048 root_config = self._frozen_config.roots[task.root]
5049 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5052 if self._dynamic_config.digraph.contains(inst_pkg):
5055 forbid_overlap = False
5056 heuristic_overlap = False
5057 for blocker in myblocker_uninstalls.parent_nodes(task):
5058 if not eapi_has_strong_blocks(blocker.eapi):
5059 heuristic_overlap = True
5060 elif blocker.atom.blocker.overlap.forbid:
5061 forbid_overlap = True
5063 if forbid_overlap and running_root == task.root:
5066 if heuristic_overlap and running_root == task.root:
5067 # Never uninstall sys-apps/portage or it's essential
5068 # dependencies, except through replacement.
5070 runtime_dep_atoms = \
5071 list(runtime_deps.iterAtomsForPackage(task))
5072 except portage.exception.InvalidDependString as e:
5073 portage.writemsg("!!! Invalid PROVIDE in " + \
5074 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5075 (task.root, task.cpv, e), noiselevel=-1)
5079 # Don't uninstall a runtime dep if it appears
5080 # to be the only suitable one installed.
5082 vardb = root_config.trees["vartree"].dbapi
5083 for atom in runtime_dep_atoms:
5084 other_version = None
5085 for pkg in vardb.match_pkgs(atom):
5086 if pkg.cpv == task.cpv and \
5087 pkg.metadata["COUNTER"] == \
5088 task.metadata["COUNTER"]:
5092 if other_version is None:
5098 # For packages in the system set, don't take
5099 # any chances. If the conflict can't be resolved
5100 # by a normal replacement operation then abort.
5103 for atom in root_config.sets[
5104 "system"].iterAtomsForPackage(task):
5107 except portage.exception.InvalidDependString as e:
5108 portage.writemsg("!!! Invalid PROVIDE in " + \
5109 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5110 (task.root, task.cpv, e), noiselevel=-1)
5116 # Note that the world check isn't always
5117 # necessary since self._complete_graph() will
5118 # add all packages from the system and world sets to the
5119 # graph. This just allows unresolved conflicts to be
5120 # detected as early as possible, which makes it possible
5121 # to avoid calling self._complete_graph() when it is
5122 # unnecessary due to blockers triggering an abortion.
5124 # For packages in the world set, go ahead an uninstall
5125 # when necessary, as long as the atom will be satisfied
5126 # in the final state.
5127 graph_db = self._dynamic_config.mydbapi[task.root]
5130 for atom in root_config.sets[
5131 "selected"].iterAtomsForPackage(task):
5133 for pkg in graph_db.match_pkgs(atom):
5140 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5142 except portage.exception.InvalidDependString as e:
5143 portage.writemsg("!!! Invalid PROVIDE in " + \
5144 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5145 (task.root, task.cpv, e), noiselevel=-1)
5151 # Check the deps of parent nodes to ensure that
5152 # the chosen task produces a leaf node. Maybe
5153 # this can be optimized some more to make the
5154 # best possible choice, but the current algorithm
5155 # is simple and should be near optimal for most
5157 self._spinner_update()
5158 mergeable_parent = False
5160 parent_deps.add(task)
5161 for parent in mygraph.parent_nodes(task):
5162 parent_deps.update(mygraph.child_nodes(parent,
5163 ignore_priority=priority_range.ignore_medium_soft))
5164 if min_parent_deps is not None and \
5165 len(parent_deps) >= min_parent_deps:
5166 # This task is no better than a previously selected
5167 # task, so abort search now in order to avoid wasting
5168 # any more cpu time on this task. This increases
5169 # performance dramatically in cases when there are
5170 # hundreds of blockers to solve, like when
5171 # upgrading to a new slot of kde-meta.
5172 mergeable_parent = None
5174 if parent in mergeable_nodes and \
5175 gather_deps(ignore_uninst_or_med_soft,
5176 mergeable_nodes, set(), parent):
5177 mergeable_parent = True
5179 if not mergeable_parent:
5182 if min_parent_deps is None or \
5183 len(parent_deps) < min_parent_deps:
5184 min_parent_deps = len(parent_deps)
5187 if uninst_task is not None and min_parent_deps == 1:
5188 # This is the best possible result, so so abort search
5189 # now in order to avoid wasting any more cpu time.
5192 if uninst_task is not None:
5193 # The uninstall is performed only after blocking
5194 # packages have been merged on top of it. File
5195 # collisions between blocking packages are detected
5196 # and removed from the list of files to be uninstalled.
5197 scheduled_uninstalls.add(uninst_task)
5198 parent_nodes = mygraph.parent_nodes(uninst_task)
5200 # Reverse the parent -> uninstall edges since we want
5201 # to do the uninstall after blocking packages have
5202 # been merged on top of it.
5203 mygraph.remove(uninst_task)
5204 for blocked_pkg in parent_nodes:
5205 mygraph.add(blocked_pkg, uninst_task,
5206 priority=BlockerDepPriority.instance)
5207 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5208 scheduler_graph.add(blocked_pkg, uninst_task,
5209 priority=BlockerDepPriority.instance)
5211 # Sometimes a merge node will render an uninstall
5212 # node unnecessary (due to occupying the same SLOT),
5213 # and we want to avoid executing a separate uninstall
5214 # task in that case.
5215 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5216 ].match_pkgs(uninst_task.slot_atom)
5218 slot_node[0].operation == "merge":
5219 mygraph.add(slot_node[0], uninst_task,
5220 priority=BlockerDepPriority.instance)
5222 # Reset the state variables for leaf node selection and
5223 # continue trying to select leaf nodes.
5225 drop_satisfied = False
5228 if not selected_nodes:
5229 # Only select root nodes as a last resort. This case should
5230 # only trigger when the graph is nearly empty and the only
5231 # remaining nodes are isolated (no parents or children). Since
5232 # the nodes must be isolated, ignore_priority is not needed.
5233 selected_nodes = get_nodes()
5235 if not selected_nodes and not drop_satisfied:
5236 drop_satisfied = True
5239 if not selected_nodes and not myblocker_uninstalls.is_empty():
5240 # If possible, drop an uninstall task here in order to avoid
5241 # the circular deps code path. The corresponding blocker will
5242 # still be counted as an unresolved conflict.
5244 for node in myblocker_uninstalls.leaf_nodes():
5246 mygraph.remove(node)
5251 ignored_uninstall_tasks.add(node)
5254 if uninst_task is not None:
5255 # Reset the state variables for leaf node selection and
5256 # continue trying to select leaf nodes.
5258 drop_satisfied = False
5261 if not selected_nodes:
5262 self._dynamic_config._circular_deps_for_display = mygraph
5263 self._dynamic_config._skip_restart = True
5264 raise self._unknown_internal_error()
5266 # At this point, we've succeeded in selecting one or more nodes, so
5267 # reset state variables for leaf node selection.
5269 drop_satisfied = False
5271 mygraph.difference_update(selected_nodes)
5273 for node in selected_nodes:
5274 if isinstance(node, Package) and \
5275 node.operation == "nomerge":
5278 # Handle interactions between blockers
5279 # and uninstallation tasks.
5280 solved_blockers = set()
5282 if isinstance(node, Package) and \
5283 "uninstall" == node.operation:
5284 have_uninstall_task = True
5287 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
5288 inst_pkg = vardb.match_pkgs(node.slot_atom)
5290 # The package will be replaced by this one, so remove
5291 # the corresponding Uninstall task if necessary.
5292 inst_pkg = inst_pkg[0]
5293 uninst_task = Package(built=inst_pkg.built,
5294 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5295 metadata=inst_pkg.metadata,
5296 operation="uninstall",
5297 root_config=inst_pkg.root_config,
5298 type_name=inst_pkg.type_name)
5300 mygraph.remove(uninst_task)
5304 if uninst_task is not None and \
5305 uninst_task not in ignored_uninstall_tasks and \
5306 myblocker_uninstalls.contains(uninst_task):
5307 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
5308 myblocker_uninstalls.remove(uninst_task)
5309 # Discard any blockers that this Uninstall solves.
5310 for blocker in blocker_nodes:
5311 if not myblocker_uninstalls.child_nodes(blocker):
5312 myblocker_uninstalls.remove(blocker)
5314 self._dynamic_config._unsolvable_blockers:
5315 solved_blockers.add(blocker)
5317 retlist.append(node)
5319 if (isinstance(node, Package) and \
5320 "uninstall" == node.operation) or \
5321 (uninst_task is not None and \
5322 uninst_task in scheduled_uninstalls):
5323 # Include satisfied blockers in the merge list
5324 # since the user might be interested and also
5325 # it serves as an indicator that blocking packages
5326 # will be temporarily installed simultaneously.
5327 for blocker in solved_blockers:
5328 retlist.append(blocker)
5330 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
5331 for node in myblocker_uninstalls.root_nodes():
5332 unsolvable_blockers.add(node)
5334 # If any Uninstall tasks need to be executed in order
5335 # to avoid a conflict, complete the graph with any
5336 # dependencies that may have been initially
5337 # neglected (to ensure that unsafe Uninstall tasks
5338 # are properly identified and blocked from execution).
5339 if have_uninstall_task and \
5341 not unsolvable_blockers:
5342 self._dynamic_config.myparams["complete"] = True
5343 if '--debug' in self._frozen_config.myopts:
5345 msg.append("enabling 'complete' depgraph mode " + \
5346 "due to uninstall task(s):")
5348 for node in retlist:
5349 if isinstance(node, Package) and \
5350 node.operation == 'uninstall':
5351 msg.append("\t%s" % (node,))
5352 writemsg_level("\n%s\n" % \
5353 "".join("%s\n" % line for line in msg),
5354 level=logging.DEBUG, noiselevel=-1)
5355 raise self._serialize_tasks_retry("")
5357 # Set satisfied state on blockers, but not before the
5358 # above retry path, since we don't want to modify the
5359 # state in that case.
5360 for node in retlist:
5361 if isinstance(node, Blocker):
5362 node.satisfied = True
5364 for blocker in unsolvable_blockers:
5365 retlist.append(blocker)
5367 if unsolvable_blockers and \
5368 not self._accept_blocker_conflicts():
5369 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
5370 self._dynamic_config._serialized_tasks_cache = retlist[:]
5371 self._dynamic_config._scheduler_graph = scheduler_graph
5372 self._dynamic_config._skip_restart = True
5373 raise self._unknown_internal_error()
5375 if self._dynamic_config._slot_collision_info and \
5376 not self._accept_blocker_conflicts():
5377 self._dynamic_config._serialized_tasks_cache = retlist[:]
5378 self._dynamic_config._scheduler_graph = scheduler_graph
5379 raise self._unknown_internal_error()
5381 return retlist, scheduler_graph
5383 def _show_circular_deps(self, mygraph):
5384 self._dynamic_config._circular_dependency_handler = \
5385 circular_dependency_handler(self, mygraph)
5386 handler = self._dynamic_config._circular_dependency_handler
5388 self._frozen_config.myopts.pop("--quiet", None)
5389 self._frozen_config.myopts["--verbose"] = True
5390 self._frozen_config.myopts["--tree"] = True
5391 portage.writemsg("\n\n", noiselevel=-1)
5392 self.display(handler.merge_list)
5393 prefix = colorize("BAD", " * ")
5394 portage.writemsg("\n", noiselevel=-1)
5395 portage.writemsg(prefix + "Error: circular dependencies:\n",
5397 portage.writemsg("\n", noiselevel=-1)
5399 if handler.circular_dep_message is None or \
5400 "--debug" in self._frozen_config.myopts:
5401 handler.debug_print()
5402 portage.writemsg("\n", noiselevel=-1)
5404 if handler.circular_dep_message is not None:
5405 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
5407 suggestions = handler.suggestions
5409 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
5410 if len(suggestions) == 1:
5411 writemsg("by applying the following change:\n", noiselevel=-1)
5413 writemsg("by applying " + colorize("bold", "any of") + \
5414 " the following changes:\n", noiselevel=-1)
5415 writemsg("".join(suggestions), noiselevel=-1)
5416 writemsg("\nNote that this change can be reverted, once the package has" + \
5417 " been installed.\n", noiselevel=-1)
5418 if handler.large_cycle_count:
5419 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
5420 "Several changes might be required to resolve all cycles.\n" + \
5421 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
5423 writemsg("\n\n", noiselevel=-1)
5424 writemsg(prefix + "Note that circular dependencies " + \
5425 "can often be avoided by temporarily\n", noiselevel=-1)
5426 writemsg(prefix + "disabling USE flags that trigger " + \
5427 "optional dependencies.\n", noiselevel=-1)
5429 def _show_merge_list(self):
5430 if self._dynamic_config._serialized_tasks_cache is not None and \
5431 not (self._dynamic_config._displayed_list and \
5432 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
5433 self._dynamic_config._displayed_list == \
5434 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
5435 display_list = self._dynamic_config._serialized_tasks_cache[:]
5436 if "--tree" in self._frozen_config.myopts:
5437 display_list.reverse()
5438 self.display(display_list)
5440 def _show_unsatisfied_blockers(self, blockers):
5441 self._show_merge_list()
5442 msg = "Error: The above package list contains " + \
5443 "packages which cannot be installed " + \
5444 "at the same time on the same system."
5445 prefix = colorize("BAD", " * ")
5446 portage.writemsg("\n", noiselevel=-1)
5447 for line in textwrap.wrap(msg, 70):
5448 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5450 # Display the conflicting packages along with the packages
5451 # that pulled them in. This is helpful for troubleshooting
5452 # cases in which blockers don't solve automatically and
5453 # the reasons are not apparent from the normal merge list
5457 for blocker in blockers:
5458 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
5459 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
5460 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
5461 if not parent_atoms:
5462 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
5463 if atom is not None:
5464 parent_atoms = set([("@selected", atom)])
5466 conflict_pkgs[pkg] = parent_atoms
5469 # Reduce noise by pruning packages that are only
5470 # pulled in by other conflict packages.
5472 for pkg, parent_atoms in conflict_pkgs.items():
5473 relevant_parent = False
5474 for parent, atom in parent_atoms:
5475 if parent not in conflict_pkgs:
5476 relevant_parent = True
5478 if not relevant_parent:
5479 pruned_pkgs.add(pkg)
5480 for pkg in pruned_pkgs:
5481 del conflict_pkgs[pkg]
5487 # Max number of parents shown, to avoid flooding the display.
5489 for pkg, parent_atoms in conflict_pkgs.items():
5493 # Prefer packages that are not directly involved in a conflict.
5494 for parent_atom in parent_atoms:
5495 if len(pruned_list) >= max_parents:
5497 parent, atom = parent_atom
5498 if parent not in conflict_pkgs:
5499 pruned_list.add(parent_atom)
5501 for parent_atom in parent_atoms:
5502 if len(pruned_list) >= max_parents:
5504 pruned_list.add(parent_atom)
5506 omitted_parents = len(parent_atoms) - len(pruned_list)
5507 msg.append(indent + "%s pulled in by\n" % pkg)
5509 for parent_atom in pruned_list:
5510 parent, atom = parent_atom
5511 msg.append(2*indent)
5512 if isinstance(parent,
5513 (PackageArg, AtomArg)):
5514 # For PackageArg and AtomArg types, it's
5515 # redundant to display the atom attribute.
5516 msg.append(str(parent))
5518 # Display the specific atom from SetArg or
5520 msg.append("%s required by %s" % (atom, parent))
5524 msg.append(2*indent)
5525 msg.append("(and %d more)\n" % omitted_parents)
5529 writemsg("".join(msg), noiselevel=-1)
5531 if "--quiet" not in self._frozen_config.myopts:
5532 show_blocker_docs_link()
5534 def display(self, mylist, favorites=[], verbosity=None):
5536 # This is used to prevent display_problems() from
5537 # redundantly displaying this exact same merge list
5538 # again via _show_merge_list().
5539 self._dynamic_config._displayed_list = mylist
5542 return display(self, mylist, favorites, verbosity)
5544 def _display_autounmask(self):
5546 Display --autounmask message and optionally write it to config files
5547 (using CONFIG_PROTECT). The message includes the comments and the changes.
5550 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
5551 pretend = "--pretend" in self._frozen_config.myopts
5552 ask = "--ask" in self._frozen_config.myopts
5553 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
5555 def check_if_latest(pkg):
5557 is_latest_in_slot = True
5558 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
5559 root_config = self._frozen_config.roots[pkg.root]
5561 for db, pkg_type, built, installed, db_keys in dbs:
5562 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
5563 if other_pkg.cp != pkg.cp:
5564 # old-style PROVIDE virtual means there are no
5565 # normal matches for this pkg_type
5569 if other_pkg.slot_atom == pkg.slot_atom:
5570 is_latest_in_slot = False
5573 # iter_match_pkgs yields highest version first, so
5574 # there's no need to search this pkg_type any further
5577 if not is_latest_in_slot:
5580 return is_latest, is_latest_in_slot
5582 #Set of roots we have autounmask changes for.
5585 unstable_keyword_msg = {}
5586 for pkg in self._dynamic_config._needed_unstable_keywords:
5587 self._show_merge_list()
5588 if pkg in self._dynamic_config.digraph:
5591 unstable_keyword_msg.setdefault(root, [])
5592 is_latest, is_latest_in_slot = check_if_latest(pkg)
5593 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5594 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5595 use=self._pkg_use_enabled(pkg))
5596 for reason in mreasons:
5597 if reason.unmask_hint and \
5598 reason.unmask_hint.key == 'unstable keyword':
5599 keyword = reason.unmask_hint.value
5601 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
5603 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
5604 elif is_latest_in_slot:
5605 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
5607 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5609 p_mask_change_msg = {}
5610 for pkg in self._dynamic_config._needed_p_mask_changes:
5611 self._show_merge_list()
5612 if pkg in self._dynamic_config.digraph:
5615 p_mask_change_msg.setdefault(root, [])
5616 is_latest, is_latest_in_slot = check_if_latest(pkg)
5617 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5618 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5619 use=self._pkg_use_enabled(pkg))
5620 for reason in mreasons:
5621 if reason.unmask_hint and \
5622 reason.unmask_hint.key == 'p_mask':
5623 keyword = reason.unmask_hint.value
5625 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
5627 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
5628 elif is_latest_in_slot:
5629 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
5631 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5633 use_changes_msg = {}
5634 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
5635 self._show_merge_list()
5636 if pkg in self._dynamic_config.digraph:
5639 use_changes_msg.setdefault(root, [])
5640 is_latest, is_latest_in_slot = check_if_latest(pkg)
5641 changes = needed_use_config_change[1]
5643 for flag, state in changes.items():
5645 adjustments.append(flag)
5647 adjustments.append("-" + flag)
5648 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5650 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5651 elif is_latest_in_slot:
5652 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
5654 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5657 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5658 self._show_merge_list()
5659 if pkg in self._dynamic_config.digraph:
5662 license_msg.setdefault(root, [])
5663 is_latest, is_latest_in_slot = check_if_latest(pkg)
5665 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
5667 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5668 elif is_latest_in_slot:
5669 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
5671 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5673 def find_config_file(abs_user_config, file_name):
5675 Searches /etc/portage for an appropriate file to append changes to.
5676 If the file_name is a file it is returned, if it is a directory, the
5677 last file in it is returned. Order of traversal is the identical to
5678 portage.util.grablines(recursive=True).
5680 file_name - String containing a file name like "package.use"
5681 return value - String. Absolute path of file to write to. None if
5682 no suitable file exists.
5684 file_path = os.path.join(abs_user_config, file_name)
5688 except OSError as e:
5689 if e.errno == errno.ENOENT:
5690 # The file doesn't exist, so we'll
5694 # Disk or file system trouble?
5697 last_file_path = None
5706 if stat.S_ISREG(st.st_mode):
5708 elif stat.S_ISDIR(st.st_mode):
5709 if os.path.basename(p) in _ignorecvs_dirs:
5712 contents = os.listdir(p)
5716 contents.sort(reverse=True)
5717 for child in contents:
5718 if child.startswith(".") or \
5719 child.endswith("~"):
5721 stack.append(os.path.join(p, child))
5723 return last_file_path
5725 write_to_file = autounmask_write and not pretend
5726 #Make sure we have a file to write to before doing any write.
5727 file_to_write_to = {}
5731 settings = self._frozen_config.roots[root].settings
5732 abs_user_config = os.path.join(
5733 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
5735 if root in unstable_keyword_msg:
5736 file_to_write_to[(abs_user_config, "package.keywords")] = \
5737 find_config_file(abs_user_config, "package.keywords")
5739 if root in p_mask_change_msg:
5740 file_to_write_to[(abs_user_config, "package.unmask")] = \
5741 find_config_file(abs_user_config, "package.unmask")
5743 if root in use_changes_msg:
5744 file_to_write_to[(abs_user_config, "package.use")] = \
5745 find_config_file(abs_user_config, "package.use")
5747 if root in license_msg:
5748 file_to_write_to[(abs_user_config, "package.license")] = \
5749 find_config_file(abs_user_config, "package.license")
5751 for (abs_user_config, f), path in file_to_write_to.items():
5753 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
5755 write_to_file = not problems
5758 settings = self._frozen_config.roots[root].settings
5759 abs_user_config = os.path.join(
5760 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
5763 writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
5765 if root in unstable_keyword_msg:
5766 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
5767 " are necessary to proceed:\n", noiselevel=-1)
5768 writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
5770 if root in p_mask_change_msg:
5771 writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
5772 " are necessary to proceed:\n", noiselevel=-1)
5773 writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
5775 if root in use_changes_msg:
5776 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
5777 " are necessary to proceed:\n", noiselevel=-1)
5778 writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
5780 if root in license_msg:
5781 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
5782 " are necessary to proceed:\n", noiselevel=-1)
5783 writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
5788 settings = self._frozen_config.roots[root].settings
5789 protect_obj[root] = ConfigProtect(settings["EROOT"], \
5790 shlex_split(settings.get("CONFIG_PROTECT", "")),
5791 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
5793 def write_changes(root, changes, file_to_write_to):
5794 file_contents = None
5796 file_contents = codecs.open(
5797 _unicode_encode(file_to_write_to,
5798 encoding=_encodings['fs'], errors='strict'),
5799 mode='r', encoding=_encodings['content'],
5800 errors='replace').readlines()
5801 except IOError as e:
5802 if e.errno == errno.ENOENT:
5805 problems.append("!!! Failed to read '%s': %s\n" % \
5806 (file_to_write_to, e))
5807 if file_contents is not None:
5808 file_contents.extend(changes)
5809 if protect_obj[root].isprotected(file_to_write_to):
5810 file_to_write_to = new_protect_filename(file_to_write_to)
5812 write_atomic(file_to_write_to, "".join(file_contents))
5813 except PortageException:
5814 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
5816 if ask and write_to_file and file_to_write_to:
5817 prompt = "\nWould you like to add these " + \
5818 "changes to your config files?"
5819 if userquery(prompt, enter_invalid) == 'No':
5820 write_to_file = False
5824 settings = self._frozen_config.roots[root].settings
5825 abs_user_config = os.path.join(
5826 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
5828 if root in unstable_keyword_msg:
5829 write_changes(root, unstable_keyword_msg[root],
5830 file_to_write_to.get((abs_user_config, "package.keywords")))
5832 if root in p_mask_change_msg:
5833 write_changes(root, p_mask_change_msg[root],
5834 file_to_write_to.get((abs_user_config, "package.unmask")))
5836 if root in use_changes_msg:
5837 write_changes(root, use_changes_msg[root],
5838 file_to_write_to.get((abs_user_config, "package.use")))
5840 if root in license_msg:
5841 write_changes(root, license_msg[root],
5842 file_to_write_to.get((abs_user_config, "package.license")))
5845 writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
5847 writemsg_stdout("".join(problems), noiselevel=-1)
5848 elif write_to_file and roots:
5849 writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
5851 elif not pretend and not autounmask_write and roots:
5852 writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
5856 def display_problems(self):
5858 Display problems with the dependency graph such as slot collisions.
5859 This is called internally by display() to show the problems _after_
5860 the merge list where it is most likely to be seen, but if display()
5861 is not going to be called then this method should be called explicitly
5862 to ensure that the user is notified of problems with the graph.
5864 All output goes to stderr, except for unsatisfied dependencies which
5865 go to stdout for parsing by programs such as autounmask.
5868 # Note that show_masked_packages() sends its output to
5869 # stdout, and some programs such as autounmask parse the
5870 # output in cases when emerge bails out. However, when
5871 # show_masked_packages() is called for installed packages
5872 # here, the message is a warning that is more appropriate
5873 # to send to stderr, so temporarily redirect stdout to
5874 # stderr. TODO: Fix output code so there's a cleaner way
5875 # to redirect everything to stderr.
5880 sys.stdout = sys.stderr
5881 self._display_problems()
5887 # This goes to stdout for parsing by programs like autounmask.
5888 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
5889 self._show_unsatisfied_dep(*pargs, **kwargs)
5891 def _display_problems(self):
5892 if self._dynamic_config._circular_deps_for_display is not None:
5893 self._show_circular_deps(
5894 self._dynamic_config._circular_deps_for_display)
5896 # The user is only notified of a slot conflict if
5897 # there are no unresolvable blocker conflicts.
5898 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
5899 self._show_unsatisfied_blockers(
5900 self._dynamic_config._unsatisfied_blockers_for_display)
5901 elif self._dynamic_config._slot_collision_info:
5902 self._show_slot_collision_notice()
5904 self._show_missed_update()
5906 self._display_autounmask()
5908 # TODO: Add generic support for "set problem" handlers so that
5909 # the below warnings aren't special cases for world only.
5911 if self._dynamic_config._missing_args:
5912 world_problems = False
5913 if "world" in self._dynamic_config.sets[
5914 self._frozen_config.target_root].sets:
5915 # Filter out indirect members of world (from nested sets)
5916 # since only direct members of world are desired here.
5917 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
5918 for arg, atom in self._dynamic_config._missing_args:
5919 if arg.name in ("selected", "world") and atom in world_set:
5920 world_problems = True
5924 sys.stderr.write("\n!!! Problems have been " + \
5925 "detected with your world file\n")
5926 sys.stderr.write("!!! Please run " + \
5927 green("emaint --check world")+"\n\n")
5929 if self._dynamic_config._missing_args:
5930 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5931 " Ebuilds for the following packages are either all\n")
5932 sys.stderr.write(colorize("BAD", "!!!") + \
5933 " masked or don't exist:\n")
5934 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5935 self._dynamic_config._missing_args) + "\n")
5937 if self._dynamic_config._pprovided_args:
5939 for arg, atom in self._dynamic_config._pprovided_args:
5940 if isinstance(arg, SetArg):
5942 arg_atom = (atom, atom)
5945 arg_atom = (arg.arg, atom)
5946 refs = arg_refs.setdefault(arg_atom, [])
5947 if parent not in refs:
5950 msg.append(bad("\nWARNING: "))
5951 if len(self._dynamic_config._pprovided_args) > 1:
5952 msg.append("Requested packages will not be " + \
5953 "merged because they are listed in\n")
5955 msg.append("A requested package will not be " + \
5956 "merged because it is listed in\n")
5957 msg.append("package.provided:\n\n")
5958 problems_sets = set()
5959 for (arg, atom), refs in arg_refs.items():
5962 problems_sets.update(refs)
5964 ref_string = ", ".join(["'%s'" % name for name in refs])
5965 ref_string = " pulled in by " + ref_string
5966 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5968 if "selected" in problems_sets or "world" in problems_sets:
5969 msg.append("This problem can be solved in one of the following ways:\n\n")
5970 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5971 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5972 msg.append(" C) Remove offending entries from package.provided.\n\n")
5973 msg.append("The best course of action depends on the reason that an offending\n")
5974 msg.append("package.provided entry exists.\n\n")
5975 sys.stderr.write("".join(msg))
5977 masked_packages = []
5978 for pkg in self._dynamic_config._masked_license_updates:
5979 root_config = pkg.root_config
5980 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5981 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
5982 masked_packages.append((root_config, pkgsettings,
5983 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
5985 writemsg("\n" + colorize("BAD", "!!!") + \
5986 " The following updates are masked by LICENSE changes:\n",
5988 show_masked_packages(masked_packages)
5990 writemsg("\n", noiselevel=-1)
5992 masked_packages = []
5993 for pkg in self._dynamic_config._masked_installed:
5994 root_config = pkg.root_config
5995 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5996 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
5997 masked_packages.append((root_config, pkgsettings,
5998 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6000 writemsg("\n" + colorize("BAD", "!!!") + \
6001 " The following installed packages are masked:\n",
6003 show_masked_packages(masked_packages)
6005 writemsg("\n", noiselevel=-1)
6007 def saveNomergeFavorites(self):
6008 """Find atoms in favorites that are not in the mergelist and add them
6009 to the world file if necessary."""
6010 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6011 "--oneshot", "--onlydeps", "--pretend"):
6012 if x in self._frozen_config.myopts:
6014 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6015 world_set = root_config.sets["selected"]
6017 world_locked = False
6018 if hasattr(world_set, "lock"):
6022 if hasattr(world_set, "load"):
6023 world_set.load() # maybe it's changed on disk
6025 args_set = self._dynamic_config.sets[
6026 self._frozen_config.target_root].sets['__non_set_args__']
6027 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
6028 added_favorites = set()
6029 for x in self._dynamic_config._set_nodes:
6030 if x.operation != "nomerge":
6033 if x.root != root_config.root:
6037 myfavkey = create_world_atom(x, args_set, root_config)
6039 if myfavkey in added_favorites:
6041 added_favorites.add(myfavkey)
6042 except portage.exception.InvalidDependString as e:
6043 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6044 (x.cpv, e), noiselevel=-1)
6045 writemsg("!!! see '%s'\n\n" % os.path.join(
6046 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6049 for arg in self._dynamic_config._initial_arg_list:
6050 if not isinstance(arg, SetArg):
6052 if arg.root_config.root != root_config.root:
6055 if k in ("selected", "world") or \
6056 not root_config.sets[k].world_candidate:
6061 all_added.append(SETPREFIX + k)
6062 all_added.extend(added_favorites)
6065 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
6066 colorize("INFORM", str(a)), noiselevel=-1)
6068 world_set.update(all_added)
6073 def _loadResumeCommand(self, resume_data, skip_masked=True,
6076 Add a resume command to the graph and validate it in the process. This
6077 will raise a PackageNotFound exception if a package is not available.
6082 if not isinstance(resume_data, dict):
6085 mergelist = resume_data.get("mergelist")
6086 if not isinstance(mergelist, list):
6089 favorites = resume_data.get("favorites")
6090 args_set = self._dynamic_config.sets[
6091 self._frozen_config.target_root].sets['__non_set_args__']
6092 if isinstance(favorites, list):
6093 args = self._load_favorites(favorites)
6097 fakedb = self._dynamic_config.mydbapi
6098 trees = self._frozen_config.trees
6099 serialized_tasks = []
6102 if not (isinstance(x, list) and len(x) == 4):
6104 pkg_type, myroot, pkg_key, action = x
6105 if pkg_type not in self.pkg_tree_map:
6107 if action != "merge":
6109 root_config = self._frozen_config.roots[myroot]
6111 # Use the resume "favorites" list to see if a repo was specified
6113 depgraph_sets = self._dynamic_config.sets[root_config.root]
6115 for atom in depgraph_sets.atoms.getAtoms():
6116 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6120 atom = "=" + pkg_key
6122 atom = atom + _repo_separator + repo
6125 atom = Atom(atom, allow_repo=True)
6130 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6131 if not self._pkg_visibility_check(pkg) or \
6132 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6133 modified_use=self._pkg_use_enabled(pkg)):
6138 # It does no exist or it is corrupt.
6140 # TODO: log these somewhere
6142 raise portage.exception.PackageNotFound(pkg_key)
6144 if "merge" == pkg.operation and \
6145 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6146 modified_use=self._pkg_use_enabled(pkg)):
6149 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6151 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6153 self._dynamic_config._unsatisfied_deps_for_display.append(
6154 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6156 fakedb[myroot].cpv_inject(pkg)
6157 serialized_tasks.append(pkg)
6158 self._spinner_update()
6160 if self._dynamic_config._unsatisfied_deps_for_display:
6163 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6164 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6165 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6167 self._select_package = self._select_pkg_from_graph
6168 self._dynamic_config.myparams["selective"] = True
6169 # Always traverse deep dependencies in order to account for
6170 # potentially unsatisfied dependencies of installed packages.
6171 # This is necessary for correct --keep-going or --resume operation
6172 # in case a package from a group of circularly dependent packages
6173 # fails. In this case, a package which has recently been installed
6174 # may have an unsatisfied circular dependency (pulled in by
6175 # PDEPEND, for example). So, even though a package is already
6176 # installed, it may not have all of it's dependencies satisfied, so
6177 # it may not be usable. If such a package is in the subgraph of
6178 # deep depenedencies of a scheduled build, that build needs to
6179 # be cancelled. In order for this type of situation to be
6180 # recognized, deep traversal of dependencies is required.
6181 self._dynamic_config.myparams["deep"] = True
6183 for task in serialized_tasks:
6184 if isinstance(task, Package) and \
6185 task.operation == "merge":
6186 if not self._add_pkg(task, None):
6189 # Packages for argument atoms need to be explicitly
6190 # added via _add_pkg() so that they are included in the
6191 # digraph (needed at least for --tree display).
6192 for arg in self._expand_set_args(args, add_to_digraph=True):
6193 for atom in arg.pset.getAtoms():
6194 pkg, existing_node = self._select_package(
6195 arg.root_config.root, atom)
6196 if existing_node is None and \
6198 if not self._add_pkg(pkg, Dependency(atom=atom,
6199 root=pkg.root, parent=arg)):
6202 # Allow unsatisfied deps here to avoid showing a masking
6203 # message for an unsatisfied dep that isn't necessarily
6205 if not self._create_graph(allow_unsatisfied=True):
6208 unsatisfied_deps = []
6209 for dep in self._dynamic_config._unsatisfied_deps:
6210 if not isinstance(dep.parent, Package):
6212 if dep.parent.operation == "merge":
6213 unsatisfied_deps.append(dep)
6216 # For unsatisfied deps of installed packages, only account for
6217 # them if they are in the subgraph of dependencies of a package
6218 # which is scheduled to be installed.
6219 unsatisfied_install = False
6221 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6223 node = dep_stack.pop()
6224 if not isinstance(node, Package):
6226 if node.operation == "merge":
6227 unsatisfied_install = True
6229 if node in traversed:
6232 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
6234 if unsatisfied_install:
6235 unsatisfied_deps.append(dep)
6237 if masked_tasks or unsatisfied_deps:
6238 # This probably means that a required package
6239 # was dropped via --skipfirst. It makes the
6240 # resume list invalid, so convert it to a
6241 # UnsatisfiedResumeDep exception.
6242 raise self.UnsatisfiedResumeDep(self,
6243 masked_tasks + unsatisfied_deps)
6244 self._dynamic_config._serialized_tasks_cache = None
6247 except self._unknown_internal_error:
6252 def _load_favorites(self, favorites):
6254 Use a list of favorites to resume state from a
6255 previous select_files() call. This creates similar
6256 DependencyArg instances to those that would have
6257 been created by the original select_files() call.
6258 This allows Package instances to be matched with
6259 DependencyArg instances during graph creation.
6261 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6262 sets = root_config.sets
6263 depgraph_sets = self._dynamic_config.sets[root_config.root]
6266 if not isinstance(x, basestring):
6268 if x in ("system", "world"):
6270 if x.startswith(SETPREFIX):
6271 s = x[len(SETPREFIX):]
6274 if s in depgraph_sets.sets:
6277 depgraph_sets.sets[s] = pset
6278 args.append(SetArg(arg=x, pset=pset,
6279 root_config=root_config))
6282 x = Atom(x, allow_repo=True)
6283 except portage.exception.InvalidAtom:
6285 args.append(AtomArg(arg=x, atom=x,
6286 root_config=root_config))
6288 self._set_args(args)
6291 class UnsatisfiedResumeDep(portage.exception.PortageException):
6293 A dependency of a resume list is not installed. This
6294 can occur when a required package is dropped from the
6295 merge list via --skipfirst.
6297 def __init__(self, depgraph, value):
6298 portage.exception.PortageException.__init__(self, value)
6299 self.depgraph = depgraph
6301 class _internal_exception(portage.exception.PortageException):
6302 def __init__(self, value=""):
6303 portage.exception.PortageException.__init__(self, value)
6305 class _unknown_internal_error(_internal_exception):
6307 Used by the depgraph internally to terminate graph creation.
6308 The specific reason for the failure should have been dumped
6309 to stderr, unfortunately, the exact reason for the failure
6313 class _serialize_tasks_retry(_internal_exception):
6315 This is raised by the _serialize_tasks() method when it needs to
6316 be called again for some reason. The only case that it's currently
6317 used for is when neglected dependencies need to be added to the
6318 graph in order to avoid making a potentially unsafe decision.
6321 class _backtrack_mask(_internal_exception):
6323 This is raised by _show_unsatisfied_dep() when it's called with
6324 check_backtrack=True and a matching package has been masked by
6328 def need_restart(self):
6329 return self._dynamic_config._need_restart and \
6330 not self._dynamic_config._skip_restart
6332 def success_without_autounmask(self):
6333 return self._dynamic_config._success_without_autounmask
6335 def get_backtrack_infos(self):
6336 return self._dynamic_config._backtrack_infos
6339 class _dep_check_composite_db(dbapi):
6341 A dbapi-like interface that is optimized for use in dep_check() calls.
6342 This is built on top of the existing depgraph package selection logic.
6343 Some packages that have been added to the graph may be masked from this
6344 view in order to influence the atom preference selection that occurs
6347 def __init__(self, depgraph, root):
6348 dbapi.__init__(self)
6349 self._depgraph = depgraph
6351 self._match_cache = {}
6352 self._cpv_pkg_map = {}
6354 def _clear_cache(self):
6355 self._match_cache.clear()
6356 self._cpv_pkg_map.clear()
6358 def cp_list(self, cp):
6360 Emulate cp_list just so it can be used to check for existence
6361 of new-style virtuals. Since it's a waste of time to return
6362 more than one cpv for this use case, a maximum of one cpv will
6365 if isinstance(cp, Atom):
6370 for pkg in self._depgraph._iter_match_pkgs_any(
6371 self._depgraph._frozen_config.roots[self._root], atom):
6378 def match(self, atom):
6379 ret = self._match_cache.get(atom)
6382 pkg, existing = self._depgraph._select_package(self._root, atom)
6386 # Return the highest available from select_package() as well as
6387 # any matching slots in the graph db.
6389 slots.add(pkg.metadata["SLOT"])
6390 if pkg.cp.startswith("virtual/"):
6391 # For new-style virtual lookahead that occurs inside
6392 # dep_check(), examine all slots. This is needed
6393 # so that newer slots will not unnecessarily be pulled in
6394 # when a satisfying lower slot is already installed. For
6395 # example, if virtual/jdk-1.4 is satisfied via kaffe then
6396 # there's no need to pull in a newer slot to satisfy a
6397 # virtual/jdk dependency.
6398 for db, pkg_type, built, installed, db_keys in \
6399 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
6400 for cpv in db.match(atom):
6401 if portage.cpv_getkey(cpv) != pkg.cp:
6403 slots.add(db.aux_get(cpv, ["SLOT"])[0])
6405 if self._visible(pkg):
6406 self._cpv_pkg_map[pkg.cpv] = pkg
6408 slots.remove(pkg.metadata["SLOT"])
6410 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
6411 pkg, existing = self._depgraph._select_package(
6412 self._root, slot_atom)
6415 if not self._visible(pkg):
6417 self._cpv_pkg_map[pkg.cpv] = pkg
6420 self._cpv_sort_ascending(ret)
6421 self._match_cache[atom] = ret
6424 def _visible(self, pkg):
6425 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
6427 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
6428 except (StopIteration, portage.exception.InvalidDependString):
6432 if pkg.installed and \
6433 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
6434 # Account for packages with masks (like KEYWORDS masks)
6435 # that are usually ignored in visibility checks for
6436 # installed packages, in order to handle cases like
6438 myopts = self._depgraph._frozen_config.myopts
6439 use_ebuild_visibility = myopts.get(
6440 '--use-ebuild-visibility', 'n') != 'n'
6441 avoid_update = "--update" not in myopts and \
6442 "remove" not in self._depgraph._dynamic_config.myparams
6443 usepkgonly = "--usepkgonly" in myopts
6444 if not avoid_update:
6445 if not use_ebuild_visibility and usepkgonly:
6449 pkg_eb = self._depgraph._pkg(
6450 pkg.cpv, "ebuild", pkg.root_config,
6452 except portage.exception.PackageNotFound:
6453 pkg_eb_visible = False
6454 for pkg_eb in self._depgraph._iter_match_pkgs(
6455 pkg.root_config, "ebuild",
6456 Atom("=%s" % (pkg.cpv,))):
6457 if self._depgraph._pkg_visibility_check(pkg_eb):
6458 pkg_eb_visible = True
6460 if not pkg_eb_visible:
6463 if not self._depgraph._pkg_visibility_check(pkg_eb):
6466 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
6467 self._root].get(pkg.slot_atom)
6468 if in_graph is None:
6469 # Mask choices for packages which are not the highest visible
6470 # version within their slot (since they usually trigger slot
6472 highest_visible, in_graph = self._depgraph._select_package(
6473 self._root, pkg.slot_atom)
6474 # Note: highest_visible is not necessarily the real highest
6475 # visible, especially when --update is not enabled, so use
6476 # < operator instead of !=.
6477 if pkg < highest_visible:
6479 elif in_graph != pkg:
6480 # Mask choices for packages that would trigger a slot
6481 # conflict with a previously selected package.
6485 def aux_get(self, cpv, wants):
6486 metadata = self._cpv_pkg_map[cpv].metadata
6487 return [metadata.get(x, "") for x in wants]
6489 def match_pkgs(self, atom):
6490 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
6492 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
6494 if "--quiet" in myopts:
6495 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6496 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
6497 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6498 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
6501 s = search(root_config, spinner, "--searchdesc" in myopts,
6502 "--quiet" not in myopts, "--usepkg" in myopts,
6503 "--usepkgonly" in myopts)
6504 null_cp = portage.dep_getkey(insert_category_into_atom(
6506 cat, atom_pn = portage.catsplit(null_cp)
6507 s.searchkey = atom_pn
6508 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6511 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6512 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
6514 def insert_category_into_atom(atom, category):
6515 alphanum = re.search(r'\w', atom)
6517 ret = atom[:alphanum.start()] + "%s/" % category + \
6518 atom[alphanum.start():]
6523 def _spinner_start(spinner, myopts):
6526 if "--quiet" not in myopts and \
6527 ("--pretend" in myopts or "--ask" in myopts or \
6528 "--tree" in myopts or "--verbose" in myopts):
6530 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
6532 elif "--buildpkgonly" in myopts:
6536 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
6537 if "--unordered-display" in myopts:
6538 portage.writemsg_stdout("\n" + \
6539 darkgreen("These are the packages that " + \
6540 "would be %s:" % action) + "\n\n")
6542 portage.writemsg_stdout("\n" + \
6543 darkgreen("These are the packages that " + \
6544 "would be %s, in reverse order:" % action) + "\n\n")
6546 portage.writemsg_stdout("\n" + \
6547 darkgreen("These are the packages that " + \
6548 "would be %s, in order:" % action) + "\n\n")
6550 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
6551 if not show_spinner:
6552 spinner.update = spinner.update_quiet
6555 portage.writemsg_stdout("Calculating dependencies ")
6557 def _spinner_stop(spinner):
6558 if spinner is None or \
6559 spinner.update == spinner.update_quiet:
6562 if spinner.update != spinner.update_basic:
6563 # update_basic is used for non-tty output,
6564 # so don't output backspaces in that case.
6565 portage.writemsg_stdout("\b\b")
6567 portage.writemsg_stdout("... done!\n")
6569 def backtrack_depgraph(settings, trees, myopts, myparams,
6570 myaction, myfiles, spinner):
6572 Raises PackageSetNotFound if myfiles contains a missing package set.
6574 _spinner_start(spinner, myopts)
6576 return _backtrack_depgraph(settings, trees, myopts, myparams,
6577 myaction, myfiles, spinner)
6579 _spinner_stop(spinner)
6582 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
6584 max_retries = myopts.get('--backtrack', 10)
6585 max_depth = max(1, (max_retries + 1) / 2)
6586 allow_backtracking = max_retries > 0
6587 backtracker = Backtracker(max_depth)
6590 frozen_config = _frozen_depgraph_config(settings, trees,
6594 backtrack_parameters = backtracker.get()
6596 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6597 frozen_config=frozen_config,
6598 allow_backtracking=allow_backtracking,
6599 backtrack_parameters=backtrack_parameters)
6600 success, favorites = mydepgraph.select_files(myfiles)
6602 if success or mydepgraph.success_without_autounmask():
6604 elif not allow_backtracking:
6606 elif backtracked >= max_retries:
6608 elif mydepgraph.need_restart():
6610 backtracker.feedback(mydepgraph.get_backtrack_infos())
6614 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
6616 if "--debug" in myopts:
6618 "\n\nbacktracking aborted after %s tries\n\n" % \
6619 backtracked, noiselevel=-1, level=logging.DEBUG)
6621 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6622 frozen_config=frozen_config,
6623 allow_backtracking=False,
6624 backtrack_parameters=backtracker.get_best_run())
6625 success, favorites = mydepgraph.select_files(myfiles)
6627 return (success, mydepgraph, favorites)
6630 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6632 Raises PackageSetNotFound if myfiles contains a missing package set.
6634 _spinner_start(spinner, myopts)
6636 return _resume_depgraph(settings, trees, mtimedb, myopts,
6639 _spinner_stop(spinner)
6641 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6643 Construct a depgraph for the given resume list. This will raise
6644 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
6645 TODO: Return reasons for dropped_tasks, for display/logging.
6647 @returns: (success, depgraph, dropped_tasks)
6650 skip_unsatisfied = True
6651 mergelist = mtimedb["resume"]["mergelist"]
6652 dropped_tasks = set()
6653 frozen_config = _frozen_depgraph_config(settings, trees,
6656 mydepgraph = depgraph(settings, trees,
6657 myopts, myparams, spinner, frozen_config=frozen_config)
6659 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
6660 skip_masked=skip_masked)
6661 except depgraph.UnsatisfiedResumeDep as e:
6662 if not skip_unsatisfied:
6665 graph = mydepgraph._dynamic_config.digraph
6666 unsatisfied_parents = dict((dep.parent, dep.parent) \
6668 traversed_nodes = set()
6669 unsatisfied_stack = list(unsatisfied_parents)
6670 while unsatisfied_stack:
6671 pkg = unsatisfied_stack.pop()
6672 if pkg in traversed_nodes:
6674 traversed_nodes.add(pkg)
6676 # If this package was pulled in by a parent
6677 # package scheduled for merge, removing this
6678 # package may cause the the parent package's
6679 # dependency to become unsatisfied.
6680 for parent_node in graph.parent_nodes(pkg):
6681 if not isinstance(parent_node, Package) \
6682 or parent_node.operation not in ("merge", "nomerge"):
6685 graph.child_nodes(parent_node,
6686 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
6687 if pkg in unsatisfied:
6688 unsatisfied_parents[parent_node] = parent_node
6689 unsatisfied_stack.append(parent_node)
6691 pruned_mergelist = []
6693 if isinstance(x, list) and \
6694 tuple(x) not in unsatisfied_parents:
6695 pruned_mergelist.append(x)
6697 # If the mergelist doesn't shrink then this loop is infinite.
6698 if len(pruned_mergelist) == len(mergelist):
6699 # This happens if a package can't be dropped because
6700 # it's already installed, but it has unsatisfied PDEPEND.
6702 mergelist[:] = pruned_mergelist
6704 # Exclude installed packages that have been removed from the graph due
6705 # to failure to build/install runtime dependencies after the dependent
6706 # package has already been installed.
6707 dropped_tasks.update(pkg for pkg in \
6708 unsatisfied_parents if pkg.operation != "nomerge")
6710 del e, graph, traversed_nodes, \
6711 unsatisfied_parents, unsatisfied_stack
6715 return (success, mydepgraph, dropped_tasks)
6717 def get_mask_info(root_config, cpv, pkgsettings,
6718 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
6721 metadata = dict(zip(db_keys,
6722 db.aux_get(cpv, db_keys, myrepo=myrepo)))
6726 if metadata is None:
6727 mreasons = ["corruption"]
6729 eapi = metadata['EAPI']
6732 if not portage.eapi_is_supported(eapi):
6733 mreasons = ['EAPI %s' % eapi]
6735 pkg = Package(type_name=pkg_type, root_config=root_config,
6736 cpv=cpv, built=built, installed=installed, metadata=metadata)
6739 if _pkg_use_enabled is not None:
6740 modified_use = _pkg_use_enabled(pkg)
6742 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
6744 return metadata, mreasons
6746 def show_masked_packages(masked_packages):
6747 shown_licenses = set()
6748 shown_comments = set()
6749 # Maybe there is both an ebuild and a binary. Only
6750 # show one of them to avoid redundant appearance.
6752 have_eapi_mask = False
6753 for (root_config, pkgsettings, cpv, repo,
6754 metadata, mreasons) in masked_packages:
6757 output_cpv += _repo_separator + repo
6758 if output_cpv in shown_cpvs:
6760 shown_cpvs.add(output_cpv)
6761 comment, filename = None, None
6762 if "package.mask" in mreasons:
6763 comment, filename = \
6764 portage.getmaskingreason(
6765 cpv, metadata=metadata,
6766 settings=pkgsettings,
6767 portdb=root_config.trees["porttree"].dbapi,
6768 return_location=True)
6769 missing_licenses = []
6771 if not portage.eapi_is_supported(metadata["EAPI"]):
6772 have_eapi_mask = True
6774 missing_licenses = \
6775 pkgsettings._getMissingLicenses(
6777 except portage.exception.InvalidDependString:
6778 # This will have already been reported
6779 # above via mreasons.
6782 writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
6784 if comment and comment not in shown_comments:
6785 writemsg_stdout(filename + ":\n" + comment + "\n",
6787 shown_comments.add(comment)
6788 portdb = root_config.trees["porttree"].dbapi
6789 for l in missing_licenses:
6790 l_path = portdb.findLicensePath(l)
6791 if l in shown_licenses:
6793 msg = ("A copy of the '%s' license" + \
6794 " is located at '%s'.\n\n") % (l, l_path)
6795 writemsg_stdout(msg, noiselevel=-1)
6796 shown_licenses.add(l)
6797 return have_eapi_mask
6799 def show_mask_docs():
6800 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
6801 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
6803 def show_blocker_docs_link():
6804 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
6805 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
6806 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
6808 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
6809 return [mreason.message for \
6810 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
6812 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
6813 mreasons = _getmaskingstatus(
6814 pkg, settings=pkgsettings,
6815 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
6817 if not pkg.installed:
6818 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
6819 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
6820 pkg.metadata["CHOST"]))
6823 for msg_type, msgs in pkg.invalid.items():
6826 _MaskReason("invalid", "invalid: %s" % (msg,)))
6828 if not pkg.metadata["SLOT"]:
6830 _MaskReason("invalid", "SLOT: undefined"))