1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
14 from collections import deque
15 from itertools import chain
18 from portage import os, OrderedDict
19 from portage import _unicode_decode, _unicode_encode, _encodings
20 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
21 from portage.dbapi import dbapi
22 from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
23 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
24 from portage.exception import InvalidAtom, InvalidDependString, PortageException
25 from portage.output import colorize, create_color_func, \
27 bad = create_color_func("BAD")
28 from portage.package.ebuild.getmaskingstatus import \
29 _getmaskingstatus, _MaskReason
30 from portage._sets import SETPREFIX
31 from portage._sets.base import InternalPackageSet
32 from portage.util import ConfigProtect, shlex_split, new_protect_filename
33 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
34 from portage.util import writemsg_level, write_atomic
35 from portage.util.digraph import digraph
36 from portage.versions import catpkgsplit
38 from _emerge.AtomArg import AtomArg
39 from _emerge.Blocker import Blocker
40 from _emerge.BlockerCache import BlockerCache
41 from _emerge.BlockerDepPriority import BlockerDepPriority
42 from _emerge.countdown import countdown
43 from _emerge.create_world_atom import create_world_atom
44 from _emerge.Dependency import Dependency
45 from _emerge.DependencyArg import DependencyArg
46 from _emerge.DepPriority import DepPriority
47 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
48 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
49 from _emerge.FakeVartree import FakeVartree
50 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
51 from _emerge.is_valid_package_atom import is_valid_package_atom
52 from _emerge.Package import Package
53 from _emerge.PackageArg import PackageArg
54 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
55 from _emerge.RootConfig import RootConfig
56 from _emerge.search import search
57 from _emerge.SetArg import SetArg
58 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
59 from _emerge.UnmergeDepPriority import UnmergeDepPriority
60 from _emerge.UseFlagDisplay import pkg_use_display
61 from _emerge.userquery import userquery
63 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
64 from _emerge.resolver.slot_collision import slot_conflict_handler
65 from _emerge.resolver.circular_dependency import circular_dependency_handler
66 from _emerge.resolver.output import Display
68 if sys.hexversion >= 0x3000000:
72 class _scheduler_graph_config(object):
73 def __init__(self, trees, pkg_cache, graph, mergelist):
75 self.pkg_cache = pkg_cache
77 self.mergelist = mergelist
79 def _wildcard_set(atoms):
80 pkgs = InternalPackageSet(allow_wildcard=True)
83 x = Atom(x, allow_wildcard=True)
84 except portage.exception.InvalidAtom:
85 x = Atom("*/" + x, allow_wildcard=True)
89 class _frozen_depgraph_config(object):
91 def __init__(self, settings, trees, myopts, spinner):
92 self.settings = settings
93 self.target_root = settings["ROOT"]
96 if settings.get("PORTAGE_DEBUG", "") == "1":
98 self.spinner = spinner
99 self._running_root = trees["/"]["root_config"]
100 self._opts_no_restart = frozenset(["--buildpkgonly",
101 "--fetchonly", "--fetch-all-uri", "--pretend"])
102 self.pkgsettings = {}
104 self._trees_orig = trees
106 # All Package instances
108 self._highest_license_masked = {}
110 self.trees[myroot] = {}
111 # Create a RootConfig instance that references
112 # the FakeVartree instead of the real one.
113 self.roots[myroot] = RootConfig(
114 trees[myroot]["vartree"].settings,
116 trees[myroot]["root_config"].setconfig)
117 for tree in ("porttree", "bintree"):
118 self.trees[myroot][tree] = trees[myroot][tree]
119 self.trees[myroot]["vartree"] = \
120 FakeVartree(trees[myroot]["root_config"],
121 pkg_cache=self._pkg_cache,
122 pkg_root_config=self.roots[myroot])
123 self.pkgsettings[myroot] = portage.config(
124 clone=self.trees[myroot]["vartree"].settings)
126 self._required_set_names = set(["world"])
128 atoms = ' '.join(myopts.get("--exclude", [])).split()
129 self.excluded_pkgs = _wildcard_set(atoms)
130 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
131 self.reinstall_atoms = _wildcard_set(atoms)
132 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
133 self.usepkg_exclude = _wildcard_set(atoms)
134 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
135 self.useoldpkg_atoms = _wildcard_set(atoms)
136 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
137 self.rebuild_exclude = _wildcard_set(atoms)
138 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
139 self.rebuild_ignore = _wildcard_set(atoms)
141 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
142 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
143 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
145 class _depgraph_sets(object):
147 # contains all sets added to the graph
149 # contains non-set atoms given as arguments
150 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
151 # contains all atoms from all sets added to the graph, including
152 # atoms given as arguments
153 self.atoms = InternalPackageSet(allow_repo=True)
154 self.atom_arg_map = {}
156 class _rebuild_config(object):
157 def __init__(self, frozen_config, backtrack_parameters):
158 self._graph = digraph()
159 self._frozen_config = frozen_config
160 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
161 self.orig_rebuild_list = self.rebuild_list.copy()
162 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
163 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
164 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
165 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
166 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
167 self.rebuild_if_unbuilt)
169 def add(self, dep_pkg, dep):
170 parent = dep.collapsed_parent
171 priority = dep.collapsed_priority
172 rebuild_exclude = self._frozen_config.rebuild_exclude
173 rebuild_ignore = self._frozen_config.rebuild_ignore
174 if (self.rebuild and isinstance(parent, Package) and
175 parent.built and (priority.buildtime or priority.runtime) and
176 isinstance(dep_pkg, Package) and
177 not rebuild_exclude.findAtomForPackage(parent) and
178 not rebuild_ignore.findAtomForPackage(dep_pkg)):
179 self._graph.add(dep_pkg, parent, priority)
181 def _needs_rebuild(self, dep_pkg):
182 """Check whether packages that depend on dep_pkg need to be rebuilt."""
183 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
184 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
187 if self.rebuild_if_unbuilt:
188 # dep_pkg is being installed from source, so binary
189 # packages for parents are invalid. Force rebuild
192 trees = self._frozen_config.trees
193 vardb = trees[dep_pkg.root]["vartree"].dbapi
194 if self.rebuild_if_new_rev:
195 # Parent packages are valid if a package with the same
196 # cpv is already installed.
197 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
199 # Otherwise, parent packages are valid if a package with the same
200 # version (excluding revision) is already installed.
201 assert self.rebuild_if_new_ver
202 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
203 for inst_cpv in vardb.match(dep_pkg.slot_atom):
204 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
205 if inst_cpv_norev == cpv_norev:
210 def _trigger_rebuild(self, parent, build_deps, runtime_deps):
211 root_slot = (parent.root, parent.slot_atom)
212 if root_slot in self.rebuild_list:
214 trees = self._frozen_config.trees
215 children = set(build_deps).intersection(runtime_deps)
217 for slot_atom in children:
218 kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
220 dep_root_slot = (dep_pkg.root, slot_atom)
221 if self._needs_rebuild(dep_pkg):
222 self.rebuild_list.add(root_slot)
224 elif ("--usepkg" in self._frozen_config.myopts and
225 (dep_root_slot in self.reinstall_list or
226 dep_root_slot in self.rebuild_list or
227 not dep_pkg.installed)):
229 # A direct rebuild dependency is being installed. We
230 # should update the parent as well to the latest binary,
231 # if that binary is valid.
233 # To validate the binary, we check whether all of the
234 # rebuild dependencies are present on the same binhost.
236 # 1) If parent is present on the binhost, but one of its
237 # rebuild dependencies is not, then the parent should
238 # be rebuilt from source.
239 # 2) Otherwise, the parent binary is assumed to be valid,
240 # because all of its rebuild dependencies are
242 bintree = trees[parent.root]["bintree"]
243 uri = bintree.get_pkgindex_uri(parent.cpv)
244 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
245 bindb = bintree.dbapi
246 if self.rebuild_if_new_ver and uri and uri != dep_uri:
247 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
248 for cpv in bindb.match(dep_pkg.slot_atom):
249 if cpv_norev == catpkgsplit(cpv)[:-1]:
250 dep_uri = bintree.get_pkgindex_uri(cpv)
253 if uri and uri != dep_uri:
254 # 1) Remote binary package is invalid because it was
255 # built without dep_pkg. Force rebuild.
256 self.rebuild_list.add(root_slot)
258 elif (parent.installed and
259 root_slot not in self.reinstall_list):
260 inst_build_time = parent.metadata.get("BUILD_TIME")
262 bin_build_time, = bindb.aux_get(parent.cpv,
266 if bin_build_time != inst_build_time:
267 # 2) Remote binary package is valid, and local package
268 # is not up to date. Force reinstall.
271 self.reinstall_list.add(root_slot)
274 def trigger_rebuilds(self):
276 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
277 depends on pkgA at both build-time and run-time, pkgB needs to be
284 leaf_nodes = deque(graph.leaf_nodes())
286 def ignore_non_runtime(priority):
287 return not priority.runtime
289 def ignore_non_buildtime(priority):
290 return not priority.buildtime
292 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
293 # will always know which children are being rebuilt.
294 while not graph.empty():
296 # We're interested in intersection of buildtime and runtime,
297 # so ignore edges that do not contain both.
298 leaf_nodes.extend(graph.leaf_nodes(
299 ignore_priority=ignore_non_runtime))
301 leaf_nodes.extend(graph.leaf_nodes(
302 ignore_priority=ignore_non_buildtime))
304 # We'll have to drop an edge that is both
305 # buildtime and runtime. This should be
307 leaf_nodes.append(graph.order[-1])
309 node = leaf_nodes.popleft()
310 if node not in graph:
311 # This can be triggered by circular dependencies.
313 slot_atom = node.slot_atom
315 # Remove our leaf node from the graph, keeping track of deps.
316 parents = graph.nodes[node][1].items()
318 node_build_deps = build_deps.get(node, {})
319 node_runtime_deps = runtime_deps.get(node, {})
320 for parent, priorities in parents:
322 # Ignore a direct cycle.
324 parent_bdeps = build_deps.setdefault(parent, {})
325 parent_rdeps = runtime_deps.setdefault(parent, {})
326 for priority in priorities:
327 if priority.buildtime:
328 parent_bdeps[slot_atom] = node
330 parent_rdeps[slot_atom] = node
331 if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
332 parent_rdeps.update(node_runtime_deps)
333 if not graph.child_nodes(parent):
334 leaf_nodes.append(parent)
336 # Trigger rebuilds for our leaf node. Because all of our children
337 # have been processed, build_deps and runtime_deps will be
338 # completely filled in, and self.rebuild_list / self.reinstall_list
339 # will tell us whether any of our children need to be rebuilt or
341 if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
347 class _dynamic_depgraph_config(object):
349 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
350 self.myparams = myparams.copy()
351 self._vdb_loaded = False
352 self._allow_backtracking = allow_backtracking
353 # Maps slot atom to package for each Package added to the graph.
354 self._slot_pkg_map = {}
355 # Maps nodes to the reasons they were selected for reinstallation.
356 self._reinstall_nodes = {}
358 # Contains a filtered view of preferred packages that are selected
359 # from available repositories.
360 self._filtered_trees = {}
361 # Contains installed packages and new packages that have been added
363 self._graph_trees = {}
364 # Caches visible packages returned from _select_package, for use in
365 # depgraph._iter_atoms_for_pkg() SLOT logic.
366 self._visible_pkgs = {}
367 #contains the args created by select_files
368 self._initial_arg_list = []
369 self.digraph = portage.digraph()
370 # manages sets added to the graph
372 # contains all nodes pulled in by self.sets
373 self._set_nodes = set()
374 # Contains only Blocker -> Uninstall edges
375 self._blocker_uninstalls = digraph()
376 # Contains only Package -> Blocker edges
377 self._blocker_parents = digraph()
378 # Contains only irrelevant Package -> Blocker edges
379 self._irrelevant_blockers = digraph()
380 # Contains only unsolvable Package -> Blocker edges
381 self._unsolvable_blockers = digraph()
382 # Contains all Blocker -> Blocked Package edges
383 self._blocked_pkgs = digraph()
384 # Contains world packages that have been protected from
385 # uninstallation but may not have been added to the graph
386 # if the graph is not complete yet.
387 self._blocked_world_pkgs = {}
388 # Contains packages whose dependencies have been traversed.
389 # This use used to check if we have accounted for blockers
390 # relevant to a package.
391 self._traversed_pkg_deps = set()
392 self._slot_collision_info = {}
393 # Slot collision nodes are not allowed to block other packages since
394 # blocker validation is only able to account for one package per slot.
395 self._slot_collision_nodes = set()
396 self._parent_atoms = {}
397 self._slot_conflict_parent_atoms = set()
398 self._slot_conflict_handler = None
399 self._circular_dependency_handler = None
400 self._serialized_tasks_cache = None
401 self._scheduler_graph = None
402 self._displayed_list = None
403 self._pprovided_args = []
404 self._missing_args = []
405 self._masked_installed = set()
406 self._masked_license_updates = set()
407 self._unsatisfied_deps_for_display = []
408 self._unsatisfied_blockers_for_display = None
409 self._circular_deps_for_display = None
411 self._dep_disjunctive_stack = []
412 self._unsatisfied_deps = []
413 self._initially_unsatisfied_deps = []
414 self._ignored_deps = []
415 self._highest_pkg_cache = {}
417 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
418 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
419 self._needed_license_changes = backtrack_parameters.needed_license_changes
420 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
421 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
422 self._need_restart = False
423 # For conditions that always require user intervention, such as
424 # unsatisfied REQUIRED_USE (currently has no autounmask support).
425 self._skip_restart = False
426 self._backtrack_infos = {}
428 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
429 self._success_without_autounmask = False
430 self._traverse_ignored_deps = False
432 for myroot in depgraph._frozen_config.trees:
433 self.sets[myroot] = _depgraph_sets()
434 self._slot_pkg_map[myroot] = {}
435 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
436 # This dbapi instance will model the state that the vdb will
437 # have after new packages have been installed.
438 fakedb = PackageVirtualDbapi(vardb.settings)
440 self.mydbapi[myroot] = fakedb
443 graph_tree.dbapi = fakedb
444 self._graph_trees[myroot] = {}
445 self._filtered_trees[myroot] = {}
446 # Substitute the graph tree for the vartree in dep_check() since we
447 # want atom selections to be consistent with package selections
448 # have already been made.
449 self._graph_trees[myroot]["porttree"] = graph_tree
450 self._graph_trees[myroot]["vartree"] = graph_tree
453 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
454 self._filtered_trees[myroot]["porttree"] = filtered_tree
455 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
457 # Passing in graph_tree as the vartree here could lead to better
458 # atom selections in some cases by causing atoms for packages that
459 # have been added to the graph to be preferred over other choices.
460 # However, it can trigger atom selections that result in
461 # unresolvable direct circular dependencies. For example, this
462 # happens with gwydion-dylan which depends on either itself or
463 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
464 # gwydion-dylan-bin needs to be selected in order to avoid a
465 # an unresolvable direct circular dependency.
467 # To solve the problem described above, pass in "graph_db" so that
468 # packages that have been added to the graph are distinguishable
469 # from other available packages and installed packages. Also, pass
470 # the parent package into self._select_atoms() calls so that
471 # unresolvable direct circular dependencies can be detected and
472 # avoided when possible.
473 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
474 self._filtered_trees[myroot]["vartree"] = \
475 depgraph._frozen_config.trees[myroot]["vartree"]
478 # (db, pkg_type, built, installed, db_keys)
479 if "remove" in self.myparams:
480 # For removal operations, use _dep_check_composite_db
481 # for availability and visibility checks. This provides
482 # consistency with install operations, so we don't
483 # get install/uninstall cycles like in bug #332719.
484 self._graph_trees[myroot]["porttree"] = filtered_tree
486 if "--usepkgonly" not in depgraph._frozen_config.myopts:
487 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
488 db_keys = list(portdb._aux_cache_keys)
489 dbs.append((portdb, "ebuild", False, False, db_keys))
491 if "--usepkg" in depgraph._frozen_config.myopts:
492 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
493 db_keys = list(bindb._aux_cache_keys)
494 dbs.append((bindb, "binary", True, False, db_keys))
496 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
497 db_keys = list(depgraph._frozen_config._trees_orig[myroot
498 ]["vartree"].dbapi._aux_cache_keys)
499 dbs.append((vardb, "installed", True, True, db_keys))
500 self._filtered_trees[myroot]["dbs"] = dbs
502 class depgraph(object):
504 pkg_tree_map = RootConfig.pkg_tree_map
506 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
508 def __init__(self, settings, trees, myopts, myparams, spinner,
509 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
510 if frozen_config is None:
511 frozen_config = _frozen_depgraph_config(settings, trees,
513 self._frozen_config = frozen_config
514 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
515 allow_backtracking, backtrack_parameters)
516 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
518 self._select_atoms = self._select_atoms_highest_available
519 self._select_package = self._select_pkg_highest_available
523 Load installed package metadata if appropriate. This used to be called
524 from the constructor, but that wasn't very nice since this procedure
525 is slow and it generates spinner output. So, now it's called on-demand
526 by various methods when necessary.
529 if self._dynamic_config._vdb_loaded:
532 for myroot in self._frozen_config.trees:
534 preload_installed_pkgs = \
535 "--nodeps" not in self._frozen_config.myopts
537 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
538 if not fake_vartree.dbapi:
539 # This needs to be called for the first depgraph, but not for
540 # backtracking depgraphs that share the same frozen_config.
543 # FakeVartree.sync() populates virtuals, and we want
544 # self.pkgsettings to have them populated too.
545 self._frozen_config.pkgsettings[myroot] = \
546 portage.config(clone=fake_vartree.settings)
548 if preload_installed_pkgs:
549 vardb = fake_vartree.dbapi
550 fakedb = self._dynamic_config._graph_trees[
551 myroot]["vartree"].dbapi
554 self._spinner_update()
555 # This triggers metadata updates via FakeVartree.
556 vardb.aux_get(pkg.cpv, [])
557 fakedb.cpv_inject(pkg)
559 self._dynamic_config._vdb_loaded = True
561 def _spinner_update(self):
562 if self._frozen_config.spinner:
563 self._frozen_config.spinner.update()
565 def _show_missed_update(self):
567 # In order to minimize noise, show only the highest
568 # missed update from each SLOT.
570 for pkg, mask_reasons in \
571 self._dynamic_config._runtime_pkg_mask.items():
573 # Exclude installed here since we only
574 # want to show available updates.
576 k = (pkg.root, pkg.slot_atom)
577 if k in missed_updates:
578 other_pkg, mask_type, parent_atoms = missed_updates[k]
581 for mask_type, parent_atoms in mask_reasons.items():
584 missed_updates[k] = (pkg, mask_type, parent_atoms)
587 if not missed_updates:
590 missed_update_types = {}
591 for pkg, mask_type, parent_atoms in missed_updates.values():
592 missed_update_types.setdefault(mask_type,
593 []).append((pkg, parent_atoms))
595 if '--quiet' in self._frozen_config.myopts and \
596 '--debug' not in self._frozen_config.myopts:
597 missed_update_types.pop("slot conflict", None)
598 missed_update_types.pop("missing dependency", None)
600 self._show_missed_update_slot_conflicts(
601 missed_update_types.get("slot conflict"))
603 self._show_missed_update_unsatisfied_dep(
604 missed_update_types.get("missing dependency"))
606 def _show_missed_update_unsatisfied_dep(self, missed_updates):
608 if not missed_updates:
611 backtrack_masked = []
613 for pkg, parent_atoms in missed_updates:
616 for parent, root, atom in parent_atoms:
617 self._show_unsatisfied_dep(root, atom, myparent=parent,
618 check_backtrack=True)
619 except self._backtrack_mask:
620 # This is displayed below in abbreviated form.
621 backtrack_masked.append((pkg, parent_atoms))
624 writemsg("\n!!! The following update has been skipped " + \
625 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
627 writemsg(str(pkg.slot_atom), noiselevel=-1)
629 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
630 writemsg("\n", noiselevel=-1)
632 for parent, root, atom in parent_atoms:
633 self._show_unsatisfied_dep(root, atom, myparent=parent)
634 writemsg("\n", noiselevel=-1)
637 # These are shown in abbreviated form, in order to avoid terminal
638 # flooding from mask messages as reported in bug #285832.
639 writemsg("\n!!! The following update(s) have been skipped " + \
640 "due to unsatisfied dependencies\n" + \
641 "!!! triggered by backtracking:\n\n", noiselevel=-1)
642 for pkg, parent_atoms in backtrack_masked:
643 writemsg(str(pkg.slot_atom), noiselevel=-1)
645 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
646 writemsg("\n", noiselevel=-1)
648 def _show_missed_update_slot_conflicts(self, missed_updates):
650 if not missed_updates:
654 msg.append("\n!!! One or more updates have been skipped due to " + \
655 "a dependency conflict:\n\n")
658 for pkg, parent_atoms in missed_updates:
659 msg.append(str(pkg.slot_atom))
661 msg.append(" for %s" % (pkg.root,))
664 for parent, atom in parent_atoms:
668 msg.append(" conflicts with\n")
670 if isinstance(parent,
671 (PackageArg, AtomArg)):
672 # For PackageArg and AtomArg types, it's
673 # redundant to display the atom attribute.
674 msg.append(str(parent))
676 # Display the specific atom from SetArg or
678 msg.append("%s required by %s" % (atom, parent))
682 writemsg("".join(msg), noiselevel=-1)
684 def _show_slot_collision_notice(self):
685 """Show an informational message advising the user to mask one of the
686 the packages. In some cases it may be possible to resolve this
687 automatically, but support for backtracking (removal nodes that have
688 already been selected) will be required in order to handle all possible
692 if not self._dynamic_config._slot_collision_info:
695 self._show_merge_list()
697 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
698 handler = self._dynamic_config._slot_conflict_handler
700 conflict = handler.get_conflict()
701 writemsg(conflict, noiselevel=-1)
703 explanation = handler.get_explanation()
705 writemsg(explanation, noiselevel=-1)
708 if "--quiet" in self._frozen_config.myopts:
712 msg.append("It may be possible to solve this problem ")
713 msg.append("by using package.mask to prevent one of ")
714 msg.append("those packages from being selected. ")
715 msg.append("However, it is also possible that conflicting ")
716 msg.append("dependencies exist such that they are impossible to ")
717 msg.append("satisfy simultaneously. If such a conflict exists in ")
718 msg.append("the dependencies of two different packages, then those ")
719 msg.append("packages can not be installed simultaneously.")
720 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
721 if not self._dynamic_config._allow_backtracking and \
722 (backtrack_opt is None or \
723 (backtrack_opt > 0 and backtrack_opt < 30)):
724 msg.append(" You may want to try a larger value of the ")
725 msg.append("--backtrack option, such as --backtrack=30, ")
726 msg.append("in order to see if that will solve this conflict ")
727 msg.append("automatically.")
729 for line in textwrap.wrap(''.join(msg), 70):
730 writemsg(line + '\n', noiselevel=-1)
731 writemsg('\n', noiselevel=-1)
734 msg.append("For more information, see MASKED PACKAGES ")
735 msg.append("section in the emerge man page or refer ")
736 msg.append("to the Gentoo Handbook.")
737 for line in textwrap.wrap(''.join(msg), 70):
738 writemsg(line + '\n', noiselevel=-1)
739 writemsg('\n', noiselevel=-1)
741 def _process_slot_conflicts(self):
743 Process slot conflict data to identify specific atoms which
744 lead to conflict. These atoms only match a subset of the
745 packages that have been pulled into a given slot.
747 for (slot_atom, root), slot_nodes \
748 in self._dynamic_config._slot_collision_info.items():
750 all_parent_atoms = set()
751 for pkg in slot_nodes:
752 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
755 all_parent_atoms.update(parent_atoms)
757 for pkg in slot_nodes:
758 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
759 if parent_atoms is None:
761 self._dynamic_config._parent_atoms[pkg] = parent_atoms
762 for parent_atom in all_parent_atoms:
763 if parent_atom in parent_atoms:
765 # Use package set for matching since it will match via
766 # PROVIDE when necessary, while match_from_list does not.
767 parent, atom = parent_atom
768 atom_set = InternalPackageSet(
769 initial_atoms=(atom,), allow_repo=True)
770 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
771 parent_atoms.add(parent_atom)
773 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
775 def _reinstall_for_flags(self, forced_flags,
776 orig_use, orig_iuse, cur_use, cur_iuse):
777 """Return a set of flags that trigger reinstallation, or None if there
778 are no such flags."""
779 if "--newuse" in self._frozen_config.myopts or \
780 "--binpkg-respect-use" in self._frozen_config.myopts:
781 flags = set(orig_iuse.symmetric_difference(
782 cur_iuse).difference(forced_flags))
783 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
784 cur_iuse.intersection(cur_use)))
787 elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
788 flags = orig_iuse.intersection(orig_use).symmetric_difference(
789 cur_iuse.intersection(cur_use))
794 def _create_graph(self, allow_unsatisfied=False):
795 dep_stack = self._dynamic_config._dep_stack
796 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
797 while dep_stack or dep_disjunctive_stack:
798 self._spinner_update()
800 dep = dep_stack.pop()
801 if isinstance(dep, Package):
802 if not self._add_pkg_deps(dep,
803 allow_unsatisfied=allow_unsatisfied):
806 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
808 if dep_disjunctive_stack:
809 if not self._pop_disjunction(allow_unsatisfied):
813 def _expand_set_args(self, input_args, add_to_digraph=False):
815 Iterate over a list of DependencyArg instances and yield all
816 instances given in the input together with additional SetArg
817 instances that are generated from nested sets.
818 @param input_args: An iterable of DependencyArg instances
819 @type input_args: Iterable
820 @param add_to_digraph: If True then add SetArg instances
821 to the digraph, in order to record parent -> child
822 relationships from nested sets
823 @type add_to_digraph: Boolean
825 @returns: All args given in the input together with additional
826 SetArg instances that are generated from nested sets
829 traversed_set_args = set()
831 for arg in input_args:
832 if not isinstance(arg, SetArg):
836 root_config = arg.root_config
837 depgraph_sets = self._dynamic_config.sets[root_config.root]
840 arg = arg_stack.pop()
841 if arg in traversed_set_args:
843 traversed_set_args.add(arg)
846 # Traverse nested sets and add them to the stack
847 # if they're not already in the graph. Also, graph
848 # edges between parent and nested sets.
849 for token in arg.pset.getNonAtoms():
850 if not token.startswith(SETPREFIX):
852 s = token[len(SETPREFIX):]
853 nested_set = depgraph_sets.sets.get(s)
854 if nested_set is None:
855 nested_set = root_config.sets.get(s)
856 if nested_set is not None:
857 nested_arg = SetArg(arg=token, pset=nested_set,
858 root_config=root_config)
859 arg_stack.append(nested_arg)
861 self._dynamic_config.digraph.add(nested_arg, arg,
862 priority=BlockerDepPriority.instance)
863 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
865 def _add_dep(self, dep, allow_unsatisfied=False):
866 debug = "--debug" in self._frozen_config.myopts
867 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
868 nodeps = "--nodeps" in self._frozen_config.myopts
869 deep = self._dynamic_config.myparams.get("deep", 0)
870 recurse = deep is True or dep.depth <= deep
872 if not buildpkgonly and \
874 not dep.collapsed_priority.ignored and \
875 not dep.collapsed_priority.optional and \
876 dep.parent not in self._dynamic_config._slot_collision_nodes:
877 if dep.parent.onlydeps:
878 # It's safe to ignore blockers if the
879 # parent is an --onlydeps node.
881 # The blocker applies to the root where
882 # the parent is or will be installed.
883 blocker = Blocker(atom=dep.atom,
884 eapi=dep.parent.metadata["EAPI"],
885 priority=dep.priority, root=dep.parent.root)
886 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
889 if dep.child is None:
890 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
891 onlydeps=dep.onlydeps)
893 # The caller has selected a specific package
894 # via self._minimize_packages().
896 existing_node = self._dynamic_config._slot_pkg_map[
897 dep.root].get(dep_pkg.slot_atom)
900 if (dep.collapsed_priority.optional or
901 dep.collapsed_priority.ignored):
902 # This is an unnecessary build-time dep.
904 if allow_unsatisfied:
905 self._dynamic_config._unsatisfied_deps.append(dep)
907 self._dynamic_config._unsatisfied_deps_for_display.append(
908 ((dep.root, dep.atom), {"myparent":dep.parent}))
910 # The parent node should not already be in
911 # runtime_pkg_mask, since that would trigger an
912 # infinite backtracking loop.
913 if self._dynamic_config._allow_backtracking:
914 if dep.parent in self._dynamic_config._runtime_pkg_mask:
915 if "--debug" in self._frozen_config.myopts:
917 "!!! backtracking loop detected: %s %s\n" % \
919 self._dynamic_config._runtime_pkg_mask[
920 dep.parent]), noiselevel=-1)
921 elif not self.need_restart():
922 # Do not backtrack if only USE have to be changed in
923 # order to satisfy the dependency.
924 dep_pkg, existing_node = \
925 self._select_package(dep.root, dep.atom.without_use,
926 onlydeps=dep.onlydeps)
928 self._dynamic_config._backtrack_infos["missing dependency"] = dep
929 self._dynamic_config._need_restart = True
930 if "--debug" in self._frozen_config.myopts:
934 msg.append("backtracking due to unsatisfied dep:")
935 msg.append(" parent: %s" % dep.parent)
936 msg.append(" priority: %s" % dep.priority)
937 msg.append(" root: %s" % dep.root)
938 msg.append(" atom: %s" % dep.atom)
940 writemsg_level("".join("%s\n" % l for l in msg),
941 noiselevel=-1, level=logging.DEBUG)
945 self._rebuild.add(dep_pkg, dep)
947 ignore = dep.collapsed_priority.ignored and \
948 not self._dynamic_config._traverse_ignored_deps
949 if not ignore and not self._add_pkg(dep_pkg, dep):
953 def _check_slot_conflict(self, pkg, atom):
954 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
957 matches = pkg.cpv == existing_node.cpv
958 if pkg != existing_node and \
960 # Use package set for matching since it will match via
961 # PROVIDE when necessary, while match_from_list does not.
962 matches = bool(InternalPackageSet(initial_atoms=(atom,),
963 allow_repo=True).findAtomForPackage(existing_node,
964 modified_use=self._pkg_use_enabled(existing_node)))
966 return (existing_node, matches)
968 def _add_pkg(self, pkg, dep):
975 myparent = dep.parent
976 priority = dep.priority
979 priority = DepPriority()
981 Fills the digraph with nodes comprised of packages to merge.
982 mybigkey is the package spec of the package to merge.
983 myparent is the package depending on mybigkey ( or None )
984 addme = Should we add this package to the digraph or are we just looking at it's deps?
985 Think --onlydeps, we need to ignore packages in that case.
988 #IUSE-aware emerge -> USE DEP aware depgraph
989 #"no downgrade" emerge
991 # Ensure that the dependencies of the same package
992 # are never processed more than once.
993 previously_added = pkg in self._dynamic_config.digraph
995 # select the correct /var database that we'll be checking against
996 vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
997 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1002 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1003 except portage.exception.InvalidDependString as e:
1004 if not pkg.installed:
1005 # should have been masked before it was selected
1009 # NOTE: REQUIRED_USE checks are delayed until after
1010 # package selection, since we want to prompt the user
1011 # for USE adjustment rather than have REQUIRED_USE
1012 # affect package selection and || dep choices.
1013 if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
1014 eapi_has_required_use(pkg.metadata["EAPI"]):
1015 required_use_is_sat = check_required_use(
1016 pkg.metadata["REQUIRED_USE"],
1017 self._pkg_use_enabled(pkg),
1018 pkg.iuse.is_valid_flag)
1019 if not required_use_is_sat:
1020 if dep.atom is not None and dep.parent is not None:
1021 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1024 for parent_atom in arg_atoms:
1025 parent, atom = parent_atom
1026 self._add_parent_atom(pkg, parent_atom)
1030 atom = Atom("=" + pkg.cpv)
1031 self._dynamic_config._unsatisfied_deps_for_display.append(
1032 ((pkg.root, atom), {"myparent":dep.parent}))
1033 self._dynamic_config._skip_restart = True
1036 if not pkg.onlydeps:
1037 if not pkg.installed and \
1038 "empty" not in self._dynamic_config.myparams and \
1039 vardbapi.match(pkg.slot_atom):
1040 # Increase the priority of dependencies on packages that
1041 # are being rebuilt. This optimizes merge order so that
1042 # dependencies are rebuilt/updated as soon as possible,
1043 # which is needed especially when emerge is called by
1044 # revdep-rebuild since dependencies may be affected by ABI
1045 # breakage that has rendered them useless. Don't adjust
1046 # priority here when in "empty" mode since all packages
1047 # are being merged in that case.
1048 priority.rebuild = True
1050 existing_node, existing_node_matches = \
1051 self._check_slot_conflict(pkg, dep.atom)
1052 slot_collision = False
1054 if existing_node_matches:
1055 # The existing node can be reused.
1057 for parent_atom in arg_atoms:
1058 parent, atom = parent_atom
1059 self._dynamic_config.digraph.add(existing_node, parent,
1061 self._add_parent_atom(existing_node, parent_atom)
1062 # If a direct circular dependency is not an unsatisfied
1063 # buildtime dependency then drop it here since otherwise
1064 # it can skew the merge order calculation in an unwanted
1066 if existing_node != myparent or \
1067 (priority.buildtime and not priority.satisfied):
1068 self._dynamic_config.digraph.addnode(existing_node, myparent,
1070 if dep.atom is not None and dep.parent is not None:
1071 self._add_parent_atom(existing_node,
1072 (dep.parent, dep.atom))
1075 # A slot conflict has occurred.
1076 # The existing node should not already be in
1077 # runtime_pkg_mask, since that would trigger an
1078 # infinite backtracking loop.
1079 if self._dynamic_config._allow_backtracking and \
1081 self._dynamic_config._runtime_pkg_mask:
1082 if "--debug" in self._frozen_config.myopts:
1084 "!!! backtracking loop detected: %s %s\n" % \
1086 self._dynamic_config._runtime_pkg_mask[
1087 existing_node]), noiselevel=-1)
1088 elif self._dynamic_config._allow_backtracking and \
1089 not self._accept_blocker_conflicts() and \
1090 not self.need_restart():
1092 self._add_slot_conflict(pkg)
1093 if dep.atom is not None and dep.parent is not None:
1094 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1097 for parent_atom in arg_atoms:
1098 parent, atom = parent_atom
1099 self._add_parent_atom(pkg, parent_atom)
1100 self._process_slot_conflicts()
1105 # The ordering of backtrack_data can make
1106 # a difference here, because both mask actions may lead
1107 # to valid, but different, solutions and the one with
1108 # 'existing_node' masked is usually the better one. Because
1109 # of that, we choose an order such that
1110 # the backtracker will first explore the choice with
1111 # existing_node masked. The backtracker reverses the
1112 # order, so the order it uses is the reverse of the
1113 # order shown here. See bug #339606.
1114 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
1115 # For missed update messages, find out which
1116 # atoms matched to_be_selected that did not
1117 # match to_be_masked.
1119 self._dynamic_config._parent_atoms.get(to_be_selected, set())
1121 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
1123 parent_atoms = conflict_atoms
1125 all_parents.update(parent_atoms)
1128 for parent, atom in parent_atoms:
1129 i = InternalPackageSet(initial_atoms=(atom,),
1131 if not i.findAtomForPackage(to_be_masked):
1135 if to_be_selected >= to_be_masked:
1136 # We only care about the parent atoms
1137 # when they trigger a downgrade.
1138 parent_atoms = set()
1140 fallback_data.append((to_be_masked, parent_atoms))
1143 # 'to_be_masked' does not violate any parent atom, which means
1144 # there is no point in masking it.
1147 backtrack_data.append((to_be_masked, parent_atoms))
1149 if not backtrack_data:
1150 # This shouldn't happen, but fall back to the old
1151 # behavior if this gets triggered somehow.
1152 backtrack_data = fallback_data
1154 if len(backtrack_data) > 1:
1155 # NOTE: Generally, we prefer to mask the higher
1156 # version since this solves common cases in which a
1157 # lower version is needed so that all dependencies
1158 # will be satisfied (bug #337178). However, if
1159 # existing_node happens to be installed then we
1160 # mask that since this is a common case that is
1161 # triggered when --update is not enabled.
1162 if existing_node.installed:
1164 elif pkg > existing_node:
1165 backtrack_data.reverse()
1167 to_be_masked = backtrack_data[-1][0]
1169 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
1170 self._dynamic_config._need_restart = True
1171 if "--debug" in self._frozen_config.myopts:
1175 msg.append("backtracking due to slot conflict:")
1176 if backtrack_data is fallback_data:
1177 msg.append("!!! backtrack_data fallback")
1178 msg.append(" first package: %s" % existing_node)
1179 msg.append(" second package: %s" % pkg)
1180 msg.append(" package to mask: %s" % to_be_masked)
1181 msg.append(" slot: %s" % pkg.slot_atom)
1182 msg.append(" parents: %s" % ", ".join( \
1183 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1185 writemsg_level("".join("%s\n" % l for l in msg),
1186 noiselevel=-1, level=logging.DEBUG)
1189 # A slot collision has occurred. Sometimes this coincides
1190 # with unresolvable blockers, so the slot collision will be
1191 # shown later if there are no unresolvable blockers.
1192 self._add_slot_conflict(pkg)
1193 slot_collision = True
1196 # Now add this node to the graph so that self.display()
1197 # can show use flags and --tree portage.output. This node is
1198 # only being partially added to the graph. It must not be
1199 # allowed to interfere with the other nodes that have been
1200 # added. Do not overwrite data for existing nodes in
1201 # self._dynamic_config.mydbapi since that data will be used for blocker
1203 # Even though the graph is now invalid, continue to process
1204 # dependencies so that things like --fetchonly can still
1205 # function despite collisions.
1207 elif not previously_added:
1208 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1209 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1210 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1211 self._dynamic_config._highest_pkg_cache.clear()
1212 self._check_masks(pkg)
1214 if not pkg.installed:
1215 # Allow this package to satisfy old-style virtuals in case it
1216 # doesn't already. Any pre-existing providers will be preferred
1219 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1220 # For consistency, also update the global virtuals.
1221 settings = self._frozen_config.roots[pkg.root].settings
1223 settings.setinst(pkg.cpv, pkg.metadata)
1225 except portage.exception.InvalidDependString as e:
1226 if not pkg.installed:
1227 # should have been masked before it was selected
1231 self._dynamic_config._set_nodes.add(pkg)
1233 # Do this even when addme is False (--onlydeps) so that the
1234 # parent/child relationship is always known in case
1235 # self._show_slot_collision_notice() needs to be called later.
1236 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1237 if dep.atom is not None and dep.parent is not None:
1238 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1241 for parent_atom in arg_atoms:
1242 parent, atom = parent_atom
1243 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1244 self._add_parent_atom(pkg, parent_atom)
1246 """ This section determines whether we go deeper into dependencies or not.
1247 We want to go deeper on a few occasions:
1248 Installing package A, we need to make sure package A's deps are met.
1249 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1250 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1255 deep = self._dynamic_config.myparams.get("deep", 0)
1256 recurse = deep is True or depth + 1 <= deep
1257 dep_stack = self._dynamic_config._dep_stack
1258 if "recurse" not in self._dynamic_config.myparams:
1260 elif pkg.installed and not recurse:
1261 dep_stack = self._dynamic_config._ignored_deps
1263 self._spinner_update()
1265 if not previously_added:
1266 dep_stack.append(pkg)
1269 def _check_masks(self, pkg):
1271 slot_key = (pkg.root, pkg.slot_atom)
1273 # Check for upgrades in the same slot that are
1274 # masked due to a LICENSE change in a newer
1275 # version that is not masked for any other reason.
1276 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1277 if other_pkg is not None and pkg < other_pkg:
1278 self._dynamic_config._masked_license_updates.add(other_pkg)
1280 def _add_parent_atom(self, pkg, parent_atom):
1281 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1282 if parent_atoms is None:
1283 parent_atoms = set()
1284 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1285 parent_atoms.add(parent_atom)
1287 def _add_slot_conflict(self, pkg):
1288 self._dynamic_config._slot_collision_nodes.add(pkg)
1289 slot_key = (pkg.slot_atom, pkg.root)
1290 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1291 if slot_nodes is None:
1293 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1294 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1297 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1299 mytype = pkg.type_name
1302 metadata = pkg.metadata
1303 myuse = self._pkg_use_enabled(pkg)
1305 depth = pkg.depth + 1
1306 removal_action = "remove" in self._dynamic_config.myparams
1309 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1311 edepend[k] = metadata[k]
1313 if not pkg.built and \
1314 "--buildpkgonly" in self._frozen_config.myopts and \
1315 "deep" not in self._dynamic_config.myparams:
1316 edepend["RDEPEND"] = ""
1317 edepend["PDEPEND"] = ""
1319 ignore_build_time_deps = False
1320 if pkg.built and not removal_action:
1321 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1322 # Pull in build time deps as requested, but marked them as
1323 # "optional" since they are not strictly required. This allows
1324 # more freedom in the merge order calculation for solving
1325 # circular dependencies. Don't convert to PDEPEND since that
1326 # could make --with-bdeps=y less effective if it is used to
1327 # adjust merge order to prevent built_with_use() calls from
1331 ignore_build_time_deps = True
1333 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1334 # Removal actions never traverse ignored buildtime
1335 # dependencies, so it's safe to discard them early.
1336 edepend["DEPEND"] = ""
1337 ignore_build_time_deps = True
1340 depend_root = myroot
1343 root_deps = self._frozen_config.myopts.get("--root-deps")
1344 if root_deps is not None:
1345 if root_deps is True:
1346 depend_root = myroot
1347 elif root_deps == "rdeps":
1348 ignore_build_time_deps = True
1350 # If rebuild mode is not enabled, it's safe to discard ignored
1351 # build-time dependencies. If you want these deps to be traversed
1352 # in "complete" mode then you need to specify --with-bdeps=y.
1353 if ignore_build_time_deps and \
1354 not self._rebuild.rebuild:
1355 edepend["DEPEND"] = ""
1358 (depend_root, edepend["DEPEND"],
1359 self._priority(buildtime=True,
1360 optional=(pkg.built or ignore_build_time_deps),
1361 ignored=ignore_build_time_deps)),
1362 (myroot, edepend["RDEPEND"],
1363 self._priority(runtime=True)),
1364 (myroot, edepend["PDEPEND"],
1365 self._priority(runtime_post=True))
1368 debug = "--debug" in self._frozen_config.myopts
1369 strict = mytype != "installed"
1371 for dep_root, dep_string, dep_priority in deps:
1375 writemsg_level("\nParent: %s\n" % (pkg,),
1376 noiselevel=-1, level=logging.DEBUG)
1377 writemsg_level("Depstring: %s\n" % (dep_string,),
1378 noiselevel=-1, level=logging.DEBUG)
1379 writemsg_level("Priority: %s\n" % (dep_priority,),
1380 noiselevel=-1, level=logging.DEBUG)
1383 dep_string = portage.dep.use_reduce(dep_string,
1384 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1385 except portage.exception.InvalidDependString as e:
1386 if not pkg.installed:
1387 # should have been masked before it was selected
1391 # Try again, but omit the is_valid_flag argument, since
1392 # invalid USE conditionals are a common problem and it's
1393 # practical to ignore this issue for installed packages.
1395 dep_string = portage.dep.use_reduce(dep_string,
1396 uselist=self._pkg_use_enabled(pkg))
1397 except portage.exception.InvalidDependString as e:
1398 self._dynamic_config._masked_installed.add(pkg)
1403 dep_string = list(self._queue_disjunctive_deps(
1404 pkg, dep_root, dep_priority, dep_string))
1405 except portage.exception.InvalidDependString as e:
1407 self._dynamic_config._masked_installed.add(pkg)
1411 # should have been masked before it was selected
1417 dep_string = portage.dep.paren_enclose(dep_string,
1418 unevaluated_atom=True)
1420 if not self._add_pkg_dep_string(
1421 pkg, dep_root, dep_priority, dep_string,
1425 self._dynamic_config._traversed_pkg_deps.add(pkg)
1428 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1430 _autounmask_backup = self._dynamic_config._autounmask
1431 if dep_priority.optional or dep_priority.ignored:
1432 # Temporarily disable autounmask for deps that
1433 # don't necessarily need to be satisfied.
1434 self._dynamic_config._autounmask = False
1436 return self._wrapped_add_pkg_dep_string(
1437 pkg, dep_root, dep_priority, dep_string,
1440 self._dynamic_config._autounmask = _autounmask_backup
1442 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1443 dep_string, allow_unsatisfied):
1444 depth = pkg.depth + 1
1445 deep = self._dynamic_config.myparams.get("deep", 0)
1446 recurse_satisfied = deep is True or depth <= deep
1447 debug = "--debug" in self._frozen_config.myopts
1448 strict = pkg.type_name != "installed"
1451 writemsg_level("\nParent: %s\n" % (pkg,),
1452 noiselevel=-1, level=logging.DEBUG)
1453 writemsg_level("Depstring: %s\n" % (dep_string,),
1454 noiselevel=-1, level=logging.DEBUG)
1455 writemsg_level("Priority: %s\n" % (dep_priority,),
1456 noiselevel=-1, level=logging.DEBUG)
1459 selected_atoms = self._select_atoms(dep_root,
1460 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1461 strict=strict, priority=dep_priority)
1462 except portage.exception.InvalidDependString as e:
1464 self._dynamic_config._masked_installed.add(pkg)
1467 # should have been masked before it was selected
1471 writemsg_level("Candidates: %s\n" % \
1472 ([str(x) for x in selected_atoms[pkg]],),
1473 noiselevel=-1, level=logging.DEBUG)
1475 root_config = self._frozen_config.roots[dep_root]
1476 vardb = root_config.trees["vartree"].dbapi
1477 traversed_virt_pkgs = set()
1479 reinstall_atoms = self._frozen_config.reinstall_atoms
1480 for atom, child in self._minimize_children(
1481 pkg, dep_priority, root_config, selected_atoms[pkg]):
1483 # If this was a specially generated virtual atom
1484 # from dep_check, map it back to the original, in
1485 # order to avoid distortion in places like display
1486 # or conflict resolution code.
1487 is_virt = hasattr(atom, '_orig_atom')
1488 atom = getattr(atom, '_orig_atom', atom)
1490 if atom.blocker and \
1491 (dep_priority.optional or dep_priority.ignored):
1492 # For --with-bdeps, ignore build-time only blockers
1493 # that originate from built packages.
1496 mypriority = dep_priority.copy()
1497 if not atom.blocker:
1498 root_slot = (pkg.root, pkg.slot_atom)
1499 inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(atom)
1500 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1501 modified_use=self._pkg_use_enabled(inst_pkg))]
1503 for inst_pkg in inst_pkgs:
1504 if self._pkg_visibility_check(inst_pkg):
1506 mypriority.satisfied = inst_pkg
1508 if not mypriority.satisfied:
1509 # none visible, so use highest
1510 mypriority.satisfied = inst_pkgs[0]
1512 dep = Dependency(atom=atom,
1513 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1514 priority=mypriority, root=dep_root)
1516 # In some cases, dep_check will return deps that shouldn't
1517 # be proccessed any further, so they are identified and
1518 # discarded here. Try to discard as few as possible since
1519 # discarded dependencies reduce the amount of information
1520 # available for optimization of merge order.
1522 if not atom.blocker and \
1523 not recurse_satisfied and \
1524 mypriority.satisfied and \
1525 mypriority.satisfied.visible and \
1526 dep.child is not None and \
1527 not dep.child.installed and \
1528 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1529 dep.child.slot_atom) is None:
1531 if dep.root == self._frozen_config.target_root:
1533 myarg = next(self._iter_atoms_for_pkg(dep.child))
1534 except StopIteration:
1536 except InvalidDependString:
1537 if not dep.child.installed:
1538 # This shouldn't happen since the package
1539 # should have been masked.
1543 # Existing child selection may not be valid unless
1544 # it's added to the graph immediately, since "complete"
1545 # mode may select a different child later.
1548 self._dynamic_config._ignored_deps.append(dep)
1551 if dep_priority.ignored and \
1552 not self._dynamic_config._traverse_ignored_deps:
1553 if is_virt and dep.child is not None:
1554 traversed_virt_pkgs.add(dep.child)
1556 self._dynamic_config._ignored_deps.append(dep)
1558 if not self._add_dep(dep,
1559 allow_unsatisfied=allow_unsatisfied):
1561 if is_virt and dep.child is not None:
1562 traversed_virt_pkgs.add(dep.child)
1564 selected_atoms.pop(pkg)
1566 # Add selected indirect virtual deps to the graph. This
1567 # takes advantage of circular dependency avoidance that's done
1568 # by dep_zapdeps. We preserve actual parent/child relationships
1569 # here in order to avoid distorting the dependency graph like
1570 # <=portage-2.1.6.x did.
1571 for virt_dep, atoms in selected_atoms.items():
1573 virt_pkg = virt_dep.child
1574 if virt_pkg not in traversed_virt_pkgs:
1578 writemsg_level("Candidates: %s: %s\n" % \
1579 (virt_pkg.cpv, [str(x) for x in atoms]),
1580 noiselevel=-1, level=logging.DEBUG)
1582 if not dep_priority.ignored or \
1583 self._dynamic_config._traverse_ignored_deps:
1584 if not self._add_pkg(virt_pkg, virt_dep):
1587 for atom, child in self._minimize_children(
1588 pkg, self._priority(runtime=True), root_config, atoms):
1590 # If this was a specially generated virtual atom
1591 # from dep_check, map it back to the original, in
1592 # order to avoid distortion in places like display
1593 # or conflict resolution code.
1594 is_virt = hasattr(atom, '_orig_atom')
1595 atom = getattr(atom, '_orig_atom', atom)
1597 # This is a GLEP 37 virtual, so its deps are all runtime.
1598 mypriority = self._priority(runtime=True)
1599 if not atom.blocker:
1600 inst_pkgs = [inst_pkg for inst_pkg in vardb.match_pkgs(atom)
1601 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1602 modified_use=self._pkg_use_enabled(inst_pkg))]
1604 for inst_pkg in inst_pkgs:
1605 if self._pkg_visibility_check(inst_pkg):
1607 mypriority.satisfied = inst_pkg
1609 if not mypriority.satisfied:
1610 # none visible, so use highest
1611 mypriority.satisfied = inst_pkgs[0]
1613 # Dependencies of virtuals are considered to have the
1614 # same depth as the virtual itself.
1615 dep = Dependency(atom=atom,
1616 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1617 parent=virt_pkg, priority=mypriority, root=dep_root,
1618 collapsed_parent=pkg, collapsed_priority=dep_priority)
1621 if not atom.blocker and \
1622 not recurse_satisfied and \
1623 mypriority.satisfied and \
1624 mypriority.satisfied.visible and \
1625 dep.child is not None and \
1626 not dep.child.installed and \
1627 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1628 dep.child.slot_atom) is None:
1630 if dep.root == self._frozen_config.target_root:
1632 myarg = next(self._iter_atoms_for_pkg(dep.child))
1633 except StopIteration:
1635 except InvalidDependString:
1636 if not dep.child.installed:
1642 self._dynamic_config._ignored_deps.append(dep)
1645 if dep_priority.ignored and \
1646 not self._dynamic_config._traverse_ignored_deps:
1647 if is_virt and dep.child is not None:
1648 traversed_virt_pkgs.add(dep.child)
1650 self._dynamic_config._ignored_deps.append(dep)
1652 if not self._add_dep(dep,
1653 allow_unsatisfied=allow_unsatisfied):
1655 if is_virt and dep.child is not None:
1656 traversed_virt_pkgs.add(dep.child)
1659 writemsg_level("Exiting... %s\n" % (pkg,),
1660 noiselevel=-1, level=logging.DEBUG)
1664 def _minimize_children(self, parent, priority, root_config, atoms):
1666 Selects packages to satisfy the given atoms, and minimizes the
1667 number of selected packages. This serves to identify and eliminate
1668 redundant package selections when multiple atoms happen to specify
1678 dep_pkg, existing_node = self._select_package(
1679 root_config.root, atom)
1683 atom_pkg_map[atom] = dep_pkg
1685 if len(atom_pkg_map) < 2:
1686 for item in atom_pkg_map.items():
1692 for atom, pkg in atom_pkg_map.items():
1693 pkg_atom_map.setdefault(pkg, set()).add(atom)
1694 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1696 for cp, pkgs in cp_pkg_map.items():
1699 for atom in pkg_atom_map[pkg]:
1703 # Use a digraph to identify and eliminate any
1704 # redundant package selections.
1705 atom_pkg_graph = digraph()
1708 for atom in pkg_atom_map[pkg1]:
1710 atom_pkg_graph.add(pkg1, atom)
1711 atom_set = InternalPackageSet(initial_atoms=(atom,),
1716 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1717 atom_pkg_graph.add(pkg2, atom)
1720 eliminate_pkg = True
1721 for atom in atom_pkg_graph.parent_nodes(pkg):
1722 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1723 eliminate_pkg = False
1726 atom_pkg_graph.remove(pkg)
1728 # Yield ~, =*, < and <= atoms first, since those are more likely to
1729 # cause slot conflicts, and we want those atoms to be displayed
1730 # in the resulting slot conflict message (see bug #291142).
1733 for atom in cp_atoms:
1735 for child_pkg in atom_pkg_graph.child_nodes(atom):
1736 existing_node, matches = \
1737 self._check_slot_conflict(child_pkg, atom)
1738 if existing_node and not matches:
1742 conflict_atoms.append(atom)
1744 normal_atoms.append(atom)
1746 for atom in chain(conflict_atoms, normal_atoms):
1747 child_pkgs = atom_pkg_graph.child_nodes(atom)
1748 # if more than one child, yield highest version
1749 if len(child_pkgs) > 1:
1751 yield (atom, child_pkgs[-1])
1753 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1755 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1756 Yields non-disjunctive deps. Raises InvalidDependString when
1760 while i < len(dep_struct):
1762 if isinstance(x, list):
1763 for y in self._queue_disjunctive_deps(
1764 pkg, dep_root, dep_priority, x):
1767 self._queue_disjunction(pkg, dep_root, dep_priority,
1768 [ x, dep_struct[ i + 1 ] ] )
1772 x = portage.dep.Atom(x)
1773 except portage.exception.InvalidAtom:
1774 if not pkg.installed:
1775 raise portage.exception.InvalidDependString(
1776 "invalid atom: '%s'" % x)
1778 # Note: Eventually this will check for PROPERTIES=virtual
1779 # or whatever other metadata gets implemented for this
1781 if x.cp.startswith('virtual/'):
1782 self._queue_disjunction( pkg, dep_root,
1783 dep_priority, [ str(x) ] )
1788 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1789 self._dynamic_config._dep_disjunctive_stack.append(
1790 (pkg, dep_root, dep_priority, dep_struct))
1792 def _pop_disjunction(self, allow_unsatisfied):
1794 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1795 populate self._dynamic_config._dep_stack.
1797 pkg, dep_root, dep_priority, dep_struct = \
1798 self._dynamic_config._dep_disjunctive_stack.pop()
1799 dep_string = portage.dep.paren_enclose(dep_struct,
1800 unevaluated_atom=True)
1801 if not self._add_pkg_dep_string(
1802 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1806 def _priority(self, **kwargs):
1807 if "remove" in self._dynamic_config.myparams:
1808 priority_constructor = UnmergeDepPriority
1810 priority_constructor = DepPriority
1811 return priority_constructor(**kwargs)
1813 def _dep_expand(self, root_config, atom_without_category):
1815 @param root_config: a root config instance
1816 @type root_config: RootConfig
1817 @param atom_without_category: an atom without a category component
1818 @type atom_without_category: String
1820 @returns: a list of atoms containing categories (possibly empty)
1822 null_cp = portage.dep_getkey(insert_category_into_atom(
1823 atom_without_category, "null"))
1824 cat, atom_pn = portage.catsplit(null_cp)
1826 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1828 for db, pkg_type, built, installed, db_keys in dbs:
1829 for cat in db.categories:
1830 if db.cp_list("%s/%s" % (cat, atom_pn)):
1834 for cat in categories:
1835 deps.append(Atom(insert_category_into_atom(
1836 atom_without_category, cat), allow_repo=True))
1839 def _have_new_virt(self, root, atom_cp):
1841 for db, pkg_type, built, installed, db_keys in \
1842 self._dynamic_config._filtered_trees[root]["dbs"]:
1843 if db.cp_list(atom_cp):
1848 def _iter_atoms_for_pkg(self, pkg):
1849 depgraph_sets = self._dynamic_config.sets[pkg.root]
1850 atom_arg_map = depgraph_sets.atom_arg_map
1851 root_config = self._frozen_config.roots[pkg.root]
1852 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1853 if atom.cp != pkg.cp and \
1854 self._have_new_virt(pkg.root, atom.cp):
1857 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1858 visible_pkgs.reverse() # descending order
1860 for visible_pkg in visible_pkgs:
1861 if visible_pkg.cp != atom.cp:
1863 if pkg >= visible_pkg:
1864 # This is descending order, and we're not
1865 # interested in any versions <= pkg given.
1867 if pkg.slot_atom != visible_pkg.slot_atom:
1868 higher_slot = visible_pkg
1870 if higher_slot is not None:
1872 for arg in atom_arg_map[(atom, pkg.root)]:
1873 if isinstance(arg, PackageArg) and \
1878 def select_files(self, myfiles):
1879 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1880 self._dynamic_config._initial_arg_list and call self._resolve to create the
1881 appropriate depgraph and return a favorite list."""
1883 debug = "--debug" in self._frozen_config.myopts
1884 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1885 sets = root_config.sets
1886 depgraph_sets = self._dynamic_config.sets[root_config.root]
1888 myroot = self._frozen_config.target_root
1889 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
1890 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
1891 real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
1892 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
1893 bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
1894 pkgsettings = self._frozen_config.pkgsettings[myroot]
1896 onlydeps = "--onlydeps" in self._frozen_config.myopts
1899 ext = os.path.splitext(x)[1]
1901 if not os.path.exists(x):
1903 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1904 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1905 elif os.path.exists(
1906 os.path.join(pkgsettings["PKGDIR"], x)):
1907 x = os.path.join(pkgsettings["PKGDIR"], x)
1909 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
1910 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1911 return 0, myfavorites
1912 mytbz2=portage.xpak.tbz2(x)
1913 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1914 if os.path.realpath(x) != \
1915 os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
1916 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1917 self._dynamic_config._skip_restart = True
1918 return 0, myfavorites
1920 pkg = self._pkg(mykey, "binary", root_config,
1922 args.append(PackageArg(arg=x, package=pkg,
1923 root_config=root_config))
1924 elif ext==".ebuild":
1925 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1926 pkgdir = os.path.dirname(ebuild_path)
1927 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1928 cp = pkgdir[len(tree_root)+1:]
1929 e = portage.exception.PackageNotFound(
1930 ("%s is not in a valid portage tree " + \
1931 "hierarchy or does not exist") % x)
1932 if not portage.isvalidatom(cp):
1934 cat = portage.catsplit(cp)[0]
1935 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1936 if not portage.isvalidatom("="+mykey):
1938 ebuild_path = portdb.findname(mykey)
1940 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1941 cp, os.path.basename(ebuild_path)):
1942 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
1943 self._dynamic_config._skip_restart = True
1944 return 0, myfavorites
1945 if mykey not in portdb.xmatch(
1946 "match-visible", portage.cpv_getkey(mykey)):
1947 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
1948 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
1949 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
1950 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
1953 raise portage.exception.PackageNotFound(
1954 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1955 pkg = self._pkg(mykey, "ebuild", root_config,
1956 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
1957 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
1958 args.append(PackageArg(arg=x, package=pkg,
1959 root_config=root_config))
1960 elif x.startswith(os.path.sep):
1961 if not x.startswith(myroot):
1962 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1963 " $ROOT.\n") % x, noiselevel=-1)
1964 self._dynamic_config._skip_restart = True
1966 # Queue these up since it's most efficient to handle
1967 # multiple files in a single iter_owners() call.
1968 lookup_owners.append(x)
1969 elif x.startswith("." + os.sep) or \
1970 x.startswith(".." + os.sep):
1971 f = os.path.abspath(x)
1972 if not f.startswith(myroot):
1973 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
1974 " $ROOT.\n") % (f, x), noiselevel=-1)
1975 self._dynamic_config._skip_restart = True
1977 lookup_owners.append(f)
1979 if x in ("system", "world"):
1981 if x.startswith(SETPREFIX):
1982 s = x[len(SETPREFIX):]
1984 raise portage.exception.PackageSetNotFound(s)
1985 if s in depgraph_sets.sets:
1988 depgraph_sets.sets[s] = pset
1989 args.append(SetArg(arg=x, pset=pset,
1990 root_config=root_config))
1992 if not is_valid_package_atom(x, allow_repo=True):
1993 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1995 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1996 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1997 self._dynamic_config._skip_restart = True
1999 # Don't expand categories or old-style virtuals here unless
2000 # necessary. Expansion of old-style virtuals here causes at
2001 # least the following problems:
2002 # 1) It's more difficult to determine which set(s) an atom
2003 # came from, if any.
2004 # 2) It takes away freedom from the resolver to choose other
2005 # possible expansions when necessary.
2007 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2008 root_config=root_config))
2010 expanded_atoms = self._dep_expand(root_config, x)
2011 installed_cp_set = set()
2012 for atom in expanded_atoms:
2013 if vardb.cp_list(atom.cp):
2014 installed_cp_set.add(atom.cp)
2016 if len(installed_cp_set) > 1:
2017 non_virtual_cps = set()
2018 for atom_cp in installed_cp_set:
2019 if not atom_cp.startswith("virtual/"):
2020 non_virtual_cps.add(atom_cp)
2021 if len(non_virtual_cps) == 1:
2022 installed_cp_set = non_virtual_cps
2024 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2025 installed_cp = next(iter(installed_cp_set))
2026 for atom in expanded_atoms:
2027 if atom.cp == installed_cp:
2029 for pkg in self._iter_match_pkgs_any(
2030 root_config, atom.without_use,
2032 if not pkg.installed:
2036 expanded_atoms = [atom]
2039 # If a non-virtual package and one or more virtual packages
2040 # are in expanded_atoms, use the non-virtual package.
2041 if len(expanded_atoms) > 1:
2042 number_of_virtuals = 0
2043 for expanded_atom in expanded_atoms:
2044 if expanded_atom.cp.startswith("virtual/"):
2045 number_of_virtuals += 1
2047 candidate = expanded_atom
2048 if len(expanded_atoms) - number_of_virtuals == 1:
2049 expanded_atoms = [ candidate ]
2051 if len(expanded_atoms) > 1:
2052 writemsg("\n\n", noiselevel=-1)
2053 ambiguous_package_name(x, expanded_atoms, root_config,
2054 self._frozen_config.spinner, self._frozen_config.myopts)
2055 self._dynamic_config._skip_restart = True
2056 return False, myfavorites
2058 atom = expanded_atoms[0]
2060 null_atom = Atom(insert_category_into_atom(x, "null"),
2062 cat, atom_pn = portage.catsplit(null_atom.cp)
2063 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2065 # Allow the depgraph to choose which virtual.
2066 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2071 args.append(AtomArg(arg=x, atom=atom,
2072 root_config=root_config))
2076 search_for_multiple = False
2077 if len(lookup_owners) > 1:
2078 search_for_multiple = True
2080 for x in lookup_owners:
2081 if not search_for_multiple and os.path.isdir(x):
2082 search_for_multiple = True
2083 relative_paths.append(x[len(myroot)-1:])
2086 for pkg, relative_path in \
2087 real_vardb._owners.iter_owners(relative_paths):
2088 owners.add(pkg.mycpv)
2089 if not search_for_multiple:
2093 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2094 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2095 self._dynamic_config._skip_restart = True
2099 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2101 # portage now masks packages with missing slot, but it's
2102 # possible that one was installed by an older version
2103 atom = Atom(portage.cpv_getkey(cpv))
2105 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2106 args.append(AtomArg(arg=atom, atom=atom,
2107 root_config=root_config))
2109 if "--update" in self._frozen_config.myopts:
2110 # In some cases, the greedy slots behavior can pull in a slot that
2111 # the user would want to uninstall due to it being blocked by a
2112 # newer version in a different slot. Therefore, it's necessary to
2113 # detect and discard any that should be uninstalled. Each time
2114 # that arguments are updated, package selections are repeated in
2115 # order to ensure consistency with the current arguments:
2117 # 1) Initialize args
2118 # 2) Select packages and generate initial greedy atoms
2119 # 3) Update args with greedy atoms
2120 # 4) Select packages and generate greedy atoms again, while
2121 # accounting for any blockers between selected packages
2122 # 5) Update args with revised greedy atoms
2124 self._set_args(args)
2127 greedy_args.append(arg)
2128 if not isinstance(arg, AtomArg):
2130 for atom in self._greedy_slots(arg.root_config, arg.atom):
2132 AtomArg(arg=arg.arg, atom=atom,
2133 root_config=arg.root_config))
2135 self._set_args(greedy_args)
2138 # Revise greedy atoms, accounting for any blockers
2139 # between selected packages.
2140 revised_greedy_args = []
2142 revised_greedy_args.append(arg)
2143 if not isinstance(arg, AtomArg):
2145 for atom in self._greedy_slots(arg.root_config, arg.atom,
2146 blocker_lookahead=True):
2147 revised_greedy_args.append(
2148 AtomArg(arg=arg.arg, atom=atom,
2149 root_config=arg.root_config))
2150 args = revised_greedy_args
2151 del revised_greedy_args
2153 self._set_args(args)
2155 myfavorites = set(myfavorites)
2157 if isinstance(arg, (AtomArg, PackageArg)):
2158 myfavorites.add(arg.atom)
2159 elif isinstance(arg, SetArg):
2160 myfavorites.add(arg.arg)
2161 myfavorites = list(myfavorites)
2164 portage.writemsg("\n", noiselevel=-1)
2165 # Order needs to be preserved since a feature of --nodeps
2166 # is to allow the user to force a specific merge order.
2167 self._dynamic_config._initial_arg_list = args[:]
2169 return self._resolve(myfavorites)
2171 def _resolve(self, myfavorites):
2172 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2173 call self._creategraph to process theier deps and return
2175 debug = "--debug" in self._frozen_config.myopts
2176 onlydeps = "--onlydeps" in self._frozen_config.myopts
2177 myroot = self._frozen_config.target_root
2178 pkgsettings = self._frozen_config.pkgsettings[myroot]
2179 pprovideddict = pkgsettings.pprovideddict
2180 virtuals = pkgsettings.getvirtuals()
2181 args = self._dynamic_config._initial_arg_list[:]
2182 for root, atom in chain(self._rebuild.rebuild_list,
2183 self._rebuild.reinstall_list):
2184 args.append(AtomArg(arg=atom, atom=atom,
2185 root_config=self._frozen_config.roots[root]))
2186 for arg in self._expand_set_args(args, add_to_digraph=True):
2187 for atom in arg.pset.getAtoms():
2188 self._spinner_update()
2189 dep = Dependency(atom=atom, onlydeps=onlydeps,
2190 root=myroot, parent=arg)
2192 pprovided = pprovideddict.get(atom.cp)
2193 if pprovided and portage.match_from_list(atom, pprovided):
2194 # A provided package has been specified on the command line.
2195 self._dynamic_config._pprovided_args.append((arg, atom))
2197 if isinstance(arg, PackageArg):
2198 if not self._add_pkg(arg.package, dep) or \
2199 not self._create_graph():
2200 if not self.need_restart():
2201 sys.stderr.write(("\n\n!!! Problem " + \
2202 "resolving dependencies for %s\n") % \
2204 return 0, myfavorites
2207 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
2208 (arg, atom), noiselevel=-1)
2209 pkg, existing_node = self._select_package(
2210 myroot, atom, onlydeps=onlydeps)
2212 pprovided_match = False
2213 for virt_choice in virtuals.get(atom.cp, []):
2214 expanded_atom = portage.dep.Atom(
2215 atom.replace(atom.cp, virt_choice.cp, 1))
2216 pprovided = pprovideddict.get(expanded_atom.cp)
2218 portage.match_from_list(expanded_atom, pprovided):
2219 # A provided package has been
2220 # specified on the command line.
2221 self._dynamic_config._pprovided_args.append((arg, atom))
2222 pprovided_match = True
2227 if not (isinstance(arg, SetArg) and \
2228 arg.name in ("selected", "system", "world")):
2229 self._dynamic_config._unsatisfied_deps_for_display.append(
2230 ((myroot, atom), {"myparent" : arg}))
2231 return 0, myfavorites
2233 self._dynamic_config._missing_args.append((arg, atom))
2235 if atom.cp != pkg.cp:
2236 # For old-style virtuals, we need to repeat the
2237 # package.provided check against the selected package.
2238 expanded_atom = atom.replace(atom.cp, pkg.cp)
2239 pprovided = pprovideddict.get(pkg.cp)
2241 portage.match_from_list(expanded_atom, pprovided):
2242 # A provided package has been
2243 # specified on the command line.
2244 self._dynamic_config._pprovided_args.append((arg, atom))
2246 if pkg.installed and "selective" not in self._dynamic_config.myparams:
2247 self._dynamic_config._unsatisfied_deps_for_display.append(
2248 ((myroot, atom), {"myparent" : arg}))
2249 # Previous behavior was to bail out in this case, but
2250 # since the dep is satisfied by the installed package,
2251 # it's more friendly to continue building the graph
2252 # and just show a warning message. Therefore, only bail
2253 # out here if the atom is not from either the system or
2255 if not (isinstance(arg, SetArg) and \
2256 arg.name in ("selected", "system", "world")):
2257 return 0, myfavorites
2259 # Add the selected package to the graph as soon as possible
2260 # so that later dep_check() calls can use it as feedback
2261 # for making more consistent atom selections.
2262 if not self._add_pkg(pkg, dep):
2263 if self.need_restart():
2265 elif isinstance(arg, SetArg):
2266 writemsg(("\n\n!!! Problem resolving " + \
2267 "dependencies for %s from %s\n") % \
2268 (atom, arg.arg), noiselevel=-1)
2270 writemsg(("\n\n!!! Problem resolving " + \
2271 "dependencies for %s\n") % \
2272 (atom,), noiselevel=-1)
2273 return 0, myfavorites
2275 except SystemExit as e:
2276 raise # Needed else can't exit
2277 except Exception as e:
2278 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2279 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2282 # Now that the root packages have been added to the graph,
2283 # process the dependencies.
2284 if not self._create_graph():
2285 return 0, myfavorites
2289 except self._unknown_internal_error:
2290 return False, myfavorites
2292 if set(self._dynamic_config.digraph).intersection( \
2293 self._dynamic_config._needed_unstable_keywords) or \
2294 set(self._dynamic_config.digraph).intersection( \
2295 self._dynamic_config._needed_p_mask_changes) or \
2296 set(self._dynamic_config.digraph).intersection( \
2297 self._dynamic_config._needed_use_config_changes) or \
2298 set(self._dynamic_config.digraph).intersection( \
2299 self._dynamic_config._needed_license_changes) :
2300 #We failed if the user needs to change the configuration
2301 self._dynamic_config._success_without_autounmask = True
2302 return False, myfavorites
2304 if self._rebuild.trigger_rebuilds():
2305 backtrack_infos = self._dynamic_config._backtrack_infos
2306 config = backtrack_infos.setdefault("config", {})
2307 config["rebuild_list"] = self._rebuild.rebuild_list
2308 config["reinstall_list"] = self._rebuild.reinstall_list
2309 self._dynamic_config._need_restart = True
2310 return False, myfavorites
2312 # We're true here unless we are missing binaries.
2313 return (True, myfavorites)
2315 def _set_args(self, args):
2317 Create the "__non_set_args__" package set from atoms and packages given as
2318 arguments. This method can be called multiple times if necessary.
2319 The package selection cache is automatically invalidated, since
2320 arguments influence package selections.
2325 for root in self._dynamic_config.sets:
2326 depgraph_sets = self._dynamic_config.sets[root]
2327 depgraph_sets.sets.setdefault('__non_set_args__',
2328 InternalPackageSet(allow_repo=True)).clear()
2329 depgraph_sets.atoms.clear()
2330 depgraph_sets.atom_arg_map.clear()
2331 set_atoms[root] = []
2332 non_set_atoms[root] = []
2334 # We don't add set args to the digraph here since that
2335 # happens at a later stage and we don't want to make
2336 # any state changes here that aren't reversed by a
2337 # another call to this method.
2338 for arg in self._expand_set_args(args, add_to_digraph=False):
2339 atom_arg_map = self._dynamic_config.sets[
2340 arg.root_config.root].atom_arg_map
2341 if isinstance(arg, SetArg):
2342 atom_group = set_atoms[arg.root_config.root]
2344 atom_group = non_set_atoms[arg.root_config.root]
2346 for atom in arg.pset.getAtoms():
2347 atom_group.append(atom)
2348 atom_key = (atom, arg.root_config.root)
2349 refs = atom_arg_map.get(atom_key)
2352 atom_arg_map[atom_key] = refs
2356 for root in self._dynamic_config.sets:
2357 depgraph_sets = self._dynamic_config.sets[root]
2358 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2359 non_set_atoms.get(root, [])))
2360 depgraph_sets.sets['__non_set_args__'].update(
2361 non_set_atoms.get(root, []))
2363 # Invalidate the package selection cache, since
2364 # arguments influence package selections.
2365 self._dynamic_config._highest_pkg_cache.clear()
2366 for trees in self._dynamic_config._filtered_trees.values():
2367 trees["porttree"].dbapi._clear_cache()
2369 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2371 Return a list of slot atoms corresponding to installed slots that
2372 differ from the slot of the highest visible match. When
2373 blocker_lookahead is True, slot atoms that would trigger a blocker
2374 conflict are automatically discarded, potentially allowing automatic
2375 uninstallation of older slots when appropriate.
2377 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2378 if highest_pkg is None:
2380 vardb = root_config.trees["vartree"].dbapi
2382 for cpv in vardb.match(atom):
2383 # don't mix new virtuals with old virtuals
2384 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2385 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2387 slots.add(highest_pkg.metadata["SLOT"])
2391 slots.remove(highest_pkg.metadata["SLOT"])
2394 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2395 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2396 if pkg is not None and \
2397 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2398 greedy_pkgs.append(pkg)
2401 if not blocker_lookahead:
2402 return [pkg.slot_atom for pkg in greedy_pkgs]
2405 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2406 for pkg in greedy_pkgs + [highest_pkg]:
2407 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2409 selected_atoms = self._select_atoms(
2410 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2411 parent=pkg, strict=True)
2412 except portage.exception.InvalidDependString:
2415 for atoms in selected_atoms.values():
2416 blocker_atoms.extend(x for x in atoms if x.blocker)
2417 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2419 if highest_pkg not in blockers:
2422 # filter packages with invalid deps
2423 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2425 # filter packages that conflict with highest_pkg
2426 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2427 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2428 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2433 # If two packages conflict, discard the lower version.
2434 discard_pkgs = set()
2435 greedy_pkgs.sort(reverse=True)
2436 for i in range(len(greedy_pkgs) - 1):
2437 pkg1 = greedy_pkgs[i]
2438 if pkg1 in discard_pkgs:
2440 for j in range(i + 1, len(greedy_pkgs)):
2441 pkg2 = greedy_pkgs[j]
2442 if pkg2 in discard_pkgs:
2444 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2445 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2447 discard_pkgs.add(pkg2)
2449 return [pkg.slot_atom for pkg in greedy_pkgs \
2450 if pkg not in discard_pkgs]
2452 def _select_atoms_from_graph(self, *pargs, **kwargs):
2454 Prefer atoms matching packages that have already been
2455 added to the graph or those that are installed and have
2456 not been scheduled for replacement.
2458 kwargs["trees"] = self._dynamic_config._graph_trees
2459 return self._select_atoms_highest_available(*pargs, **kwargs)
2461 def _select_atoms_highest_available(self, root, depstring,
2462 myuse=None, parent=None, strict=True, trees=None, priority=None):
2463 """This will raise InvalidDependString if necessary. If trees is
2464 None then self._dynamic_config._filtered_trees is used."""
2466 pkgsettings = self._frozen_config.pkgsettings[root]
2468 trees = self._dynamic_config._filtered_trees
2469 mytrees = trees[root]
2470 atom_graph = digraph()
2472 # Temporarily disable autounmask so that || preferences
2473 # account for masking and USE settings.
2474 _autounmask_backup = self._dynamic_config._autounmask
2475 self._dynamic_config._autounmask = False
2476 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2478 if parent is not None:
2479 trees[root]["parent"] = parent
2480 trees[root]["atom_graph"] = atom_graph
2481 if priority is not None:
2482 trees[root]["priority"] = priority
2483 mycheck = portage.dep_check(depstring, None,
2484 pkgsettings, myuse=myuse,
2485 myroot=root, trees=trees)
2487 self._dynamic_config._autounmask = _autounmask_backup
2488 del mytrees["pkg_use_enabled"]
2489 if parent is not None:
2490 trees[root].pop("parent")
2491 trees[root].pop("atom_graph")
2492 if priority is not None:
2493 trees[root].pop("priority")
2495 raise portage.exception.InvalidDependString(mycheck[1])
2497 selected_atoms = mycheck[1]
2498 elif parent not in atom_graph:
2499 selected_atoms = {parent : mycheck[1]}
2501 # Recursively traversed virtual dependencies, and their
2502 # direct dependencies, are considered to have the same
2503 # depth as direct dependencies.
2504 if parent.depth is None:
2507 virt_depth = parent.depth + 1
2508 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2509 selected_atoms = OrderedDict()
2510 node_stack = [(parent, None, None)]
2511 traversed_nodes = set()
2513 node, node_parent, parent_atom = node_stack.pop()
2514 traversed_nodes.add(node)
2518 if node_parent is parent:
2519 if priority is None:
2520 node_priority = None
2522 node_priority = priority.copy()
2524 # virtuals only have runtime deps
2525 node_priority = self._priority(runtime=True)
2527 k = Dependency(atom=parent_atom,
2528 blocker=parent_atom.blocker, child=node,
2529 depth=virt_depth, parent=node_parent,
2530 priority=node_priority, root=node.root)
2533 selected_atoms[k] = child_atoms
2534 for atom_node in atom_graph.child_nodes(node):
2535 child_atom = atom_node[0]
2536 if id(child_atom) not in chosen_atom_ids:
2538 child_atoms.append(child_atom)
2539 for child_node in atom_graph.child_nodes(atom_node):
2540 if child_node in traversed_nodes:
2542 if not portage.match_from_list(
2543 child_atom, [child_node]):
2544 # Typically this means that the atom
2545 # specifies USE deps that are unsatisfied
2546 # by the selected package. The caller will
2547 # record this as an unsatisfied dependency
2550 node_stack.append((child_node, node, child_atom))
2552 return selected_atoms
2554 def _expand_virt_from_graph(self, root, atom):
2555 if not isinstance(atom, Atom):
2557 graphdb = self._dynamic_config.mydbapi[root]
2558 match = graphdb.match_pkgs(atom)
2563 if not pkg.cpv.startswith("virtual/"):
2567 rdepend = self._select_atoms_from_graph(
2568 pkg.root, pkg.metadata.get("RDEPEND", ""),
2569 myuse=self._pkg_use_enabled(pkg),
2570 parent=pkg, strict=False)
2571 except InvalidDependString as e:
2572 writemsg_level("!!! Invalid RDEPEND in " + \
2573 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2574 (pkg.root, pkg.cpv, e),
2575 noiselevel=-1, level=logging.ERROR)
2579 for atoms in rdepend.values():
2581 if hasattr(atom, "_orig_atom"):
2582 # Ignore virtual atoms since we're only
2583 # interested in expanding the real atoms.
2587 def _get_dep_chain(self, start_node, target_atom=None,
2588 unsatisfied_dependency=False):
2590 Returns a list of (atom, node_type) pairs that represent a dep chain.
2591 If target_atom is None, the first package shown is pkg's parent.
2592 If target_atom is not None the first package shown is pkg.
2593 If unsatisfied_dependency is True, the first parent is select who's
2594 dependency is not satisfied by 'pkg'. This is need for USE changes.
2595 (Does not support target_atom.)
2597 traversed_nodes = set()
2601 all_parents = self._dynamic_config._parent_atoms
2603 if target_atom is not None and isinstance(node, Package):
2604 affecting_use = set()
2605 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2607 affecting_use.update(extract_affecting_use(
2608 node.metadata[dep_str], target_atom))
2609 except InvalidDependString:
2610 if not node.installed:
2612 affecting_use.difference_update(node.use.mask, node.use.force)
2613 pkg_name = _unicode_decode("%s") % (node.cpv,)
2616 for flag in affecting_use:
2617 if flag in self._pkg_use_enabled(node):
2620 usedep.append("-"+flag)
2621 pkg_name += "[%s]" % ",".join(usedep)
2623 dep_chain.append((pkg_name, node.type_name))
2625 while node is not None:
2626 traversed_nodes.add(node)
2628 if isinstance(node, DependencyArg):
2629 if self._dynamic_config.digraph.parent_nodes(node):
2632 node_type = "argument"
2633 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2635 elif node is not start_node:
2636 for ppkg, patom in all_parents[child]:
2638 atom = patom.unevaluated_atom
2642 for priority in self._dynamic_config.digraph.nodes[node][0][child]:
2643 if priority.buildtime:
2644 dep_strings.add(node.metadata["DEPEND"])
2645 if priority.runtime:
2646 dep_strings.add(node.metadata["RDEPEND"])
2647 if priority.runtime_post:
2648 dep_strings.add(node.metadata["PDEPEND"])
2650 affecting_use = set()
2651 for dep_str in dep_strings:
2652 affecting_use.update(extract_affecting_use(dep_str, atom))
2654 #Don't show flags as 'affecting' if the user can't change them,
2655 affecting_use.difference_update(node.use.mask, \
2658 pkg_name = _unicode_decode("%s") % (node.cpv,)
2661 for flag in affecting_use:
2662 if flag in self._pkg_use_enabled(node):
2665 usedep.append("-"+flag)
2666 pkg_name += "[%s]" % ",".join(usedep)
2668 dep_chain.append((pkg_name, node.type_name))
2670 if node not in self._dynamic_config.digraph:
2671 # The parent is not in the graph due to backtracking.
2674 # When traversing to parents, prefer arguments over packages
2675 # since arguments are root nodes. Never traverse the same
2676 # package twice, in order to prevent an infinite loop.
2678 selected_parent = None
2681 parent_unsatisfied = None
2683 for parent in self._dynamic_config.digraph.parent_nodes(node):
2684 if parent in traversed_nodes:
2686 if isinstance(parent, DependencyArg):
2689 if isinstance(parent, Package) and \
2690 parent.operation == "merge":
2691 parent_merge = parent
2692 if unsatisfied_dependency and node is start_node:
2693 # Make sure that pkg doesn't satisfy parent's dependency.
2694 # This ensures that we select the correct parent for use
2696 for ppkg, atom in all_parents[start_node]:
2698 atom_set = InternalPackageSet(initial_atoms=(atom,))
2699 if not atom_set.findAtomForPackage(start_node):
2700 parent_unsatisfied = parent
2703 selected_parent = parent
2705 if parent_unsatisfied is not None:
2706 selected_parent = parent_unsatisfied
2707 elif parent_merge is not None:
2708 # Prefer parent in the merge list (bug #354747).
2709 selected_parent = parent_merge
2710 elif parent_arg is not None:
2711 if self._dynamic_config.digraph.parent_nodes(parent_arg):
2712 selected_parent = parent_arg
2715 (_unicode_decode("%s") % (parent_arg,), "argument"))
2716 selected_parent = None
2718 node = selected_parent
2721 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2722 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2724 for node, node_type in dep_chain:
2725 if node_type == "argument":
2726 display_list.append("required by %s (argument)" % node)
2728 display_list.append("required by %s" % node)
2730 msg = "#" + ", ".join(display_list) + "\n"
2734 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2735 check_backtrack=False):
2737 When check_backtrack=True, no output is produced and
2738 the method either returns or raises _backtrack_mask if
2739 a matching package has been masked by backtracking.
2741 backtrack_mask = False
2742 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
2744 xinfo = '"%s"' % atom.unevaluated_atom
2747 if isinstance(myparent, AtomArg):
2748 xinfo = _unicode_decode('"%s"') % (myparent,)
2749 # Discard null/ from failed cpv_expand category expansion.
2750 xinfo = xinfo.replace("null/", "")
2752 xinfo = "%s for %s" % (xinfo, root)
2753 masked_packages = []
2755 missing_use_adjustable = set()
2756 required_use_unsatisfied = []
2757 masked_pkg_instances = set()
2758 missing_licenses = []
2759 have_eapi_mask = False
2760 pkgsettings = self._frozen_config.pkgsettings[root]
2761 root_config = self._frozen_config.roots[root]
2762 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2763 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2764 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2765 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2766 for db, pkg_type, built, installed, db_keys in dbs:
2770 if hasattr(db, "xmatch"):
2771 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
2773 cpv_list = db.match(atom.without_use)
2775 if atom.repo is None and hasattr(db, "getRepositories"):
2776 repo_list = db.getRepositories()
2778 repo_list = [atom.repo]
2782 for cpv in cpv_list:
2783 for repo in repo_list:
2784 if not db.cpv_exists(cpv, myrepo=repo):
2787 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2788 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2790 if metadata is not None:
2792 repo = metadata.get('repository')
2793 pkg = self._pkg(cpv, pkg_type, root_config,
2794 installed=installed, myrepo=repo)
2795 if not atom_set.findAtomForPackage(pkg,
2796 modified_use=self._pkg_use_enabled(pkg)):
2798 # pkg.metadata contains calculated USE for ebuilds,
2799 # required later for getMissingLicenses.
2800 metadata = pkg.metadata
2801 if pkg in self._dynamic_config._runtime_pkg_mask:
2802 backtrack_reasons = \
2803 self._dynamic_config._runtime_pkg_mask[pkg]
2804 mreasons.append('backtracking: %s' % \
2805 ', '.join(sorted(backtrack_reasons)))
2806 backtrack_mask = True
2807 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
2808 modified_use=self._pkg_use_enabled(pkg)):
2809 mreasons = ["exclude option"]
2811 masked_pkg_instances.add(pkg)
2812 if atom.unevaluated_atom.use:
2814 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
2815 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
2816 missing_use.append(pkg)
2820 writemsg("violated_conditionals raised " + \
2821 "InvalidAtom: '%s' parent: %s" % \
2822 (atom, myparent), noiselevel=-1)
2824 if not mreasons and \
2826 pkg.metadata["REQUIRED_USE"] and \
2827 eapi_has_required_use(pkg.metadata["EAPI"]):
2828 if not check_required_use(
2829 pkg.metadata["REQUIRED_USE"],
2830 self._pkg_use_enabled(pkg),
2831 pkg.iuse.is_valid_flag):
2832 required_use_unsatisfied.append(pkg)
2834 root_slot = (pkg.root, pkg.slot_atom)
2835 if pkg.built and root_slot in self._rebuild.rebuild_list:
2836 mreasons = ["need to rebuild from source"]
2837 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
2838 mreasons = ["need to rebuild from source"]
2839 elif pkg.built and not mreasons:
2840 mreasons = ["use flag configuration mismatch"]
2841 masked_packages.append(
2842 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
2846 raise self._backtrack_mask()
2850 missing_use_reasons = []
2851 missing_iuse_reasons = []
2852 for pkg in missing_use:
2853 use = self._pkg_use_enabled(pkg)
2855 #Use the unevaluated atom here, because some flags might have gone
2856 #lost during evaluation.
2857 required_flags = atom.unevaluated_atom.use.required
2858 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
2862 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2863 missing_iuse_reasons.append((pkg, mreasons))
2865 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
2866 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
2868 untouchable_flags = \
2869 frozenset(chain(pkg.use.mask, pkg.use.force))
2870 if untouchable_flags.intersection(
2871 chain(need_enable, need_disable)):
2874 missing_use_adjustable.add(pkg)
2875 required_use = pkg.metadata["REQUIRED_USE"]
2876 required_use_warning = ""
2878 old_use = self._pkg_use_enabled(pkg)
2879 new_use = set(self._pkg_use_enabled(pkg))
2880 for flag in need_enable:
2882 for flag in need_disable:
2883 new_use.discard(flag)
2884 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
2885 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
2886 required_use_warning = ", this change violates use flag constraints " + \
2887 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
2889 if need_enable or need_disable:
2891 changes.extend(colorize("red", "+" + x) \
2892 for x in need_enable)
2893 changes.extend(colorize("blue", "-" + x) \
2894 for x in need_disable)
2895 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2896 missing_use_reasons.append((pkg, mreasons))
2898 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
2899 # Lets see if the violated use deps are conditional.
2900 # If so, suggest to change them on the parent.
2902 # If the child package is masked then a change to
2903 # parent USE is not a valid solution (a normal mask
2904 # message should be displayed instead).
2905 if pkg in masked_pkg_instances:
2909 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
2910 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
2911 if not (violated_atom.use.enabled or violated_atom.use.disabled):
2912 #all violated use deps are conditional
2914 conditional = violated_atom.use.conditional
2915 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
2916 conditional.enabled, conditional.disabled))
2918 untouchable_flags = \
2919 frozenset(chain(myparent.use.mask, myparent.use.force))
2920 if untouchable_flags.intersection(involved_flags):
2923 required_use = myparent.metadata["REQUIRED_USE"]
2924 required_use_warning = ""
2926 old_use = self._pkg_use_enabled(myparent)
2927 new_use = set(self._pkg_use_enabled(myparent))
2928 for flag in involved_flags:
2930 new_use.discard(flag)
2933 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
2934 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
2935 required_use_warning = ", this change violates use flag constraints " + \
2936 "defined by %s: '%s'" % (myparent.cpv, \
2937 human_readable_required_use(required_use))
2939 for flag in involved_flags:
2940 if flag in self._pkg_use_enabled(myparent):
2941 changes.append(colorize("blue", "-" + flag))
2943 changes.append(colorize("red", "+" + flag))
2944 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
2945 if (myparent, mreasons) not in missing_use_reasons:
2946 missing_use_reasons.append((myparent, mreasons))
2948 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2949 in missing_use_reasons if pkg not in masked_pkg_instances]
2951 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2952 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2954 show_missing_use = False
2955 if unmasked_use_reasons:
2956 # Only show the latest version.
2957 show_missing_use = []
2959 parent_reason = None
2960 for pkg, mreasons in unmasked_use_reasons:
2962 if parent_reason is None:
2963 #This happens if a use change on the parent
2964 #leads to a satisfied conditional use dep.
2965 parent_reason = (pkg, mreasons)
2966 elif pkg_reason is None:
2967 #Don't rely on the first pkg in unmasked_use_reasons,
2968 #being the highest version of the dependency.
2969 pkg_reason = (pkg, mreasons)
2971 show_missing_use.append(pkg_reason)
2973 show_missing_use.append(parent_reason)
2975 elif unmasked_iuse_reasons:
2976 masked_with_iuse = False
2977 for pkg in masked_pkg_instances:
2978 #Use atom.unevaluated here, because some flags might have gone
2979 #lost during evaluation.
2980 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
2981 # Package(s) with required IUSE are masked,
2982 # so display a normal masking message.
2983 masked_with_iuse = True
2985 if not masked_with_iuse:
2986 show_missing_use = unmasked_iuse_reasons
2988 if required_use_unsatisfied:
2989 # If there's a higher unmasked version in missing_use_adjustable
2990 # then we want to show that instead.
2991 for pkg in missing_use_adjustable:
2992 if pkg not in masked_pkg_instances and \
2993 pkg > required_use_unsatisfied[0]:
2994 required_use_unsatisfied = False
2999 if required_use_unsatisfied:
3000 # We have an unmasked package that only requires USE adjustment
3001 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3002 # that the user wants the latest version, so only the first
3003 # instance is displayed.
3004 pkg = required_use_unsatisfied[0]
3005 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3006 writemsg_stdout("\n!!! " + \
3007 colorize("BAD", "The ebuild selected to satisfy ") + \
3008 colorize("INFORM", xinfo) + \
3009 colorize("BAD", " has unmet requirements.") + "\n",
3011 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3012 writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
3014 writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
3015 "are unsatisfied:\n", noiselevel=-1)
3016 reduced_noise = check_required_use(
3017 pkg.metadata["REQUIRED_USE"],
3018 self._pkg_use_enabled(pkg),
3019 pkg.iuse.is_valid_flag).tounicode()
3020 writemsg_stdout(" %s\n" % \
3021 human_readable_required_use(reduced_noise),
3023 normalized_required_use = \
3024 " ".join(pkg.metadata["REQUIRED_USE"].split())
3025 if reduced_noise != normalized_required_use:
3026 writemsg_stdout("\n The above constraints " + \
3027 "are a subset of the following complete expression:\n",
3029 writemsg_stdout(" %s\n" % \
3030 human_readable_required_use(normalized_required_use),
3032 writemsg_stdout("\n", noiselevel=-1)
3034 elif show_missing_use:
3035 writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3036 writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3037 for pkg, mreasons in show_missing_use:
3038 writemsg_stdout("- "+pkg.cpv+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3040 elif masked_packages:
3041 writemsg_stdout("\n!!! " + \
3042 colorize("BAD", "All ebuilds that could satisfy ") + \
3043 colorize("INFORM", xinfo) + \
3044 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3045 writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3046 have_eapi_mask = show_masked_packages(masked_packages)
3048 writemsg_stdout("\n", noiselevel=-1)
3049 msg = ("The current version of portage supports " + \
3050 "EAPI '%s'. You must upgrade to a newer version" + \
3051 " of portage before EAPI masked packages can" + \
3052 " be installed.") % portage.const.EAPI
3053 writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3054 writemsg_stdout("\n", noiselevel=-1)
3058 if not atom.cp.startswith("null/"):
3059 for pkg in self._iter_match_pkgs_any(
3060 root_config, Atom(atom.cp)):
3064 writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3065 if isinstance(myparent, AtomArg) and \
3067 self._frozen_config.myopts.get(
3068 "--misspell-suggestions", "y") != "n":
3069 cp = myparent.atom.cp.lower()
3070 cat, pkg = portage.catsplit(cp)
3074 writemsg_stdout("\nemerge: searching for similar names..."
3078 all_cp.update(vardb.cp_all())
3079 all_cp.update(portdb.cp_all())
3080 if "--usepkg" in self._frozen_config.myopts:
3081 all_cp.update(bindb.cp_all())
3084 for cp_orig in all_cp:
3085 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3086 all_cp = set(orig_cp_map)
3089 matches = difflib.get_close_matches(cp, all_cp)
3092 for other_cp in all_cp:
3093 other_pkg = portage.catsplit(other_cp)[1]
3094 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3095 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3097 for pkg_match in pkg_matches:
3098 matches.extend(pkg_to_cp[pkg_match])
3100 matches_orig_case = []
3102 matches_orig_case.extend(orig_cp_map[cp])
3103 matches = matches_orig_case
3105 if len(matches) == 1:
3106 writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
3108 elif len(matches) > 1:
3110 "\nemerge: Maybe you meant any of these: %s?\n" % \
3111 (", ".join(matches),), noiselevel=-1)
3113 # Generally, this would only happen if
3114 # all dbapis are empty.
3115 writemsg_stdout(" nothing similar found.\n"
3118 if not isinstance(myparent, AtomArg):
3119 # It's redundant to show parent for AtomArg since
3120 # it's the same as 'xinfo' displayed above.
3121 dep_chain = self._get_dep_chain(myparent, atom)
3122 for node, node_type in dep_chain:
3123 msg.append('(dependency required by "%s" [%s])' % \
3124 (colorize('INFORM', _unicode_decode("%s") % \
3125 (node)), node_type))
3128 writemsg_stdout("\n".join(msg), noiselevel=-1)
3129 writemsg_stdout("\n", noiselevel=-1)
3133 writemsg_stdout("\n", noiselevel=-1)
3135 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3136 for db, pkg_type, built, installed, db_keys in \
3137 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3138 for pkg in self._iter_match_pkgs(root_config,
3139 pkg_type, atom, onlydeps=onlydeps):
3142 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3144 Iterate over Package instances of pkg_type matching the given atom.
3145 This does not check visibility and it also does not match USE for
3146 unbuilt ebuilds since USE are lazily calculated after visibility
3147 checks (to avoid the expense when possible).
3150 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3152 if hasattr(db, "xmatch"):
3153 # For portdbapi we match only against the cpv, in order
3154 # to bypass unnecessary cache access for things like IUSE
3155 # and SLOT. Later, we cache the metadata in a Package
3156 # instance, and use that for further matching. This
3157 # optimization is especially relevant since
3158 # pordbapi.aux_get() does not cache calls that have
3159 # myrepo or mytree arguments.
3160 cpv_list = db.xmatch("match-all-cpv-only", atom)
3162 cpv_list = db.match(atom)
3164 # USE=multislot can make an installed package appear as if
3165 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3166 # won't do any good as long as USE=multislot is enabled since
3167 # the newly built package still won't have the expected slot.
3168 # Therefore, assume that such SLOT dependencies are already
3169 # satisfied rather than forcing a rebuild.
3170 installed = pkg_type == 'installed'
3171 if installed and not cpv_list and atom.slot:
3172 for cpv in db.match(atom.cp):
3173 slot_available = False
3174 for other_db, other_type, other_built, \
3175 other_installed, other_keys in \
3176 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3179 other_db.aux_get(cpv, ["SLOT"])[0]:
3180 slot_available = True
3184 if not slot_available:
3186 inst_pkg = self._pkg(cpv, "installed",
3187 root_config, installed=installed, myrepo = atom.repo)
3188 # Remove the slot from the atom and verify that
3189 # the package matches the resulting atom.
3190 if portage.match_from_list(
3191 atom.without_slot, [inst_pkg]):
3196 atom_set = InternalPackageSet(initial_atoms=(atom,),
3198 if atom.repo is None and hasattr(db, "getRepositories"):
3199 repo_list = db.getRepositories()
3201 repo_list = [atom.repo]
3205 for cpv in cpv_list:
3206 for repo in repo_list:
3209 pkg = self._pkg(cpv, pkg_type, root_config,
3210 installed=installed, onlydeps=onlydeps, myrepo=repo)
3211 except portage.exception.PackageNotFound:
3214 # A cpv can be returned from dbapi.match() as an
3215 # old-style virtual match even in cases when the
3216 # package does not actually PROVIDE the virtual.
3217 # Filter out any such false matches here.
3219 # Make sure that cpv from the current repo satisfies the atom.
3220 # This might not be the case if there are several repos with
3221 # the same cpv, but different metadata keys, like SLOT.
3222 # Also, for portdbapi, parts of the match that require
3223 # metadata access are deferred until we have cached the
3224 # metadata in a Package instance.
3225 if not atom_set.findAtomForPackage(pkg,
3226 modified_use=self._pkg_use_enabled(pkg)):
3230 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3231 cache_key = (root, atom, onlydeps)
3232 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3235 if pkg and not existing:
3236 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3237 if existing and existing == pkg:
3238 # Update the cache to reflect that the
3239 # package has been added to the graph.
3241 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3243 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3244 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3247 settings = pkg.root_config.settings
3248 if self._pkg_visibility_check(pkg) and \
3249 not (pkg.installed and pkg.masks):
3250 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3253 def _want_installed_pkg(self, pkg):
3255 Given an installed package returned from select_pkg, return
3256 True if the user has not explicitly requested for this package
3257 to be replaced (typically via an atom on the command line).
3259 if "selective" not in self._dynamic_config.myparams and \
3260 pkg.root == self._frozen_config.target_root:
3262 next(self._iter_atoms_for_pkg(pkg))
3263 except StopIteration:
3265 except portage.exception.InvalidDependString:
3271 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3272 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3274 default_selection = (pkg, existing)
3276 if self._dynamic_config._autounmask is True:
3277 if pkg is not None and \
3279 not self._want_installed_pkg(pkg):
3282 for only_use_changes in True, False:
3286 for allow_unmasks in (False, True):
3287 if only_use_changes and allow_unmasks:
3294 self._wrapped_select_pkg_highest_available_imp(
3295 root, atom, onlydeps=onlydeps,
3296 allow_use_changes=True,
3297 allow_unstable_keywords=(not only_use_changes),
3298 allow_license_changes=(not only_use_changes),
3299 allow_unmasks=allow_unmasks)
3301 if pkg is not None and \
3303 not self._want_installed_pkg(pkg):
3306 if self._dynamic_config._need_restart:
3310 # This ensures that we can fall back to an installed package
3311 # that may have been rejected in the autounmask path above.
3312 return default_selection
3314 return pkg, existing
3316 def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3321 if not self._dynamic_config._autounmask:
3324 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3325 root_config = self._frozen_config.roots[pkg.root]
3326 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
3328 masked_by_unstable_keywords = False
3329 masked_by_missing_keywords = False
3330 missing_licenses = None
3331 masked_by_something_else = False
3332 masked_by_p_mask = False
3334 for reason in mreasons:
3335 hint = reason.unmask_hint
3338 masked_by_something_else = True
3339 elif hint.key == "unstable keyword":
3340 masked_by_unstable_keywords = True
3341 if hint.value == "**":
3342 masked_by_missing_keywords = True
3343 elif hint.key == "p_mask":
3344 masked_by_p_mask = True
3345 elif hint.key == "license":
3346 missing_licenses = hint.value
3348 masked_by_something_else = True
3350 if masked_by_something_else:
3353 if pkg in self._dynamic_config._needed_unstable_keywords:
3354 #If the package is already keyworded, remove the mask.
3355 masked_by_unstable_keywords = False
3356 masked_by_missing_keywords = False
3358 if pkg in self._dynamic_config._needed_p_mask_changes:
3359 #If the package is already keyworded, remove the mask.
3360 masked_by_p_mask = False
3362 if missing_licenses:
3363 #If the needed licenses are already unmasked, remove the mask.
3364 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
3366 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
3367 #Package has already been unmasked.
3370 #We treat missing keywords in the same way as masks.
3371 if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
3372 (masked_by_missing_keywords and not allow_unmasks) or \
3373 (masked_by_p_mask and not allow_unmasks) or \
3374 (missing_licenses and not allow_license_changes):
3375 #We are not allowed to do the needed changes.
3378 if masked_by_unstable_keywords:
3379 self._dynamic_config._needed_unstable_keywords.add(pkg)
3380 backtrack_infos = self._dynamic_config._backtrack_infos
3381 backtrack_infos.setdefault("config", {})
3382 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
3383 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
3385 if masked_by_p_mask:
3386 self._dynamic_config._needed_p_mask_changes.add(pkg)
3387 backtrack_infos = self._dynamic_config._backtrack_infos
3388 backtrack_infos.setdefault("config", {})
3389 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
3390 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
3392 if missing_licenses:
3393 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
3394 backtrack_infos = self._dynamic_config._backtrack_infos
3395 backtrack_infos.setdefault("config", {})
3396 backtrack_infos["config"].setdefault("needed_license_changes", set())
3397 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
3401 def _pkg_use_enabled(self, pkg, target_use=None):
3403 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
3404 If target_use is given, the need changes are computed to make the package useable.
3405 Example: target_use = { "foo": True, "bar": False }
3406 The flags target_use must be in the pkg's IUSE.
3408 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
3410 if target_use is None:
3411 if needed_use_config_change is None:
3412 return pkg.use.enabled
3414 return needed_use_config_change[0]
3416 if needed_use_config_change is not None:
3417 old_use = needed_use_config_change[0]
3419 old_changes = needed_use_config_change[1]
3420 new_changes = old_changes.copy()
3422 old_use = pkg.use.enabled
3427 for flag, state in target_use.items():
3429 if flag not in old_use:
3430 if new_changes.get(flag) == False:
3432 new_changes[flag] = True
3436 if new_changes.get(flag) == True:
3438 new_changes[flag] = False
3439 new_use.update(old_use.difference(target_use))
3441 def want_restart_for_use_change(pkg, new_use):
3442 if pkg not in self._dynamic_config.digraph.nodes:
3445 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
3446 dep = pkg.metadata[key]
3447 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3448 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3450 if old_val != new_val:
3453 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3454 if not parent_atoms:
3457 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
3458 for ppkg, atom in parent_atoms:
3459 if not atom.use or \
3460 not atom.use.required.intersection(changes):
3467 if new_changes != old_changes:
3468 #Don't do the change if it violates REQUIRED_USE.
3469 required_use = pkg.metadata["REQUIRED_USE"]
3470 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3471 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3474 if pkg.use.mask.intersection(new_changes) or \
3475 pkg.use.force.intersection(new_changes):
3478 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
3479 backtrack_infos = self._dynamic_config._backtrack_infos
3480 backtrack_infos.setdefault("config", {})
3481 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
3482 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
3483 if want_restart_for_use_change(pkg, new_use):
3484 self._dynamic_config._need_restart = True
3487 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
3488 allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
3489 root_config = self._frozen_config.roots[root]
3490 pkgsettings = self._frozen_config.pkgsettings[root]
3491 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3492 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3493 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
3494 # List of acceptable packages, ordered by type preference.
3495 matched_packages = []
3496 matched_pkgs_ignore_use = []
3497 highest_version = None
3498 if not isinstance(atom, portage.dep.Atom):
3499 atom = portage.dep.Atom(atom)
3501 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
3502 existing_node = None
3504 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3505 usepkg = "--usepkg" in self._frozen_config.myopts
3506 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3507 empty = "empty" in self._dynamic_config.myparams
3508 selective = "selective" in self._dynamic_config.myparams
3510 noreplace = "--noreplace" in self._frozen_config.myopts
3511 avoid_update = "--update" not in self._frozen_config.myopts
3512 dont_miss_updates = "--update" in self._frozen_config.myopts
3513 use_ebuild_visibility = self._frozen_config.myopts.get(
3514 '--use-ebuild-visibility', 'n') != 'n'
3515 reinstall_atoms = self._frozen_config.reinstall_atoms
3516 usepkg_exclude = self._frozen_config.usepkg_exclude
3517 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
3519 # Behavior of the "selective" parameter depends on
3520 # whether or not a package matches an argument atom.
3521 # If an installed package provides an old-style
3522 # virtual that is no longer provided by an available
3523 # package, the installed package may match an argument
3524 # atom even though none of the available packages do.
3525 # Therefore, "selective" logic does not consider
3526 # whether or not an installed package matches an
3527 # argument atom. It only considers whether or not
3528 # available packages match argument atoms, which is
3529 # represented by the found_available_arg flag.
3530 found_available_arg = False
3531 packages_with_invalid_use_config = []
3532 for find_existing_node in True, False:
3535 for db, pkg_type, built, installed, db_keys in dbs:
3538 if installed and not find_existing_node:
3539 want_reinstall = reinstall or empty or \
3540 (found_available_arg and not selective)
3541 if want_reinstall and matched_packages:
3544 # Ignore USE deps for the initial match since we want to
3545 # ensure that updates aren't missed solely due to the user's
3546 # USE configuration.
3547 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3549 if pkg in self._dynamic_config._runtime_pkg_mask:
3550 # The package has been masked by the backtracking logic
3552 root_slot = (pkg.root, pkg.slot_atom)
3553 if pkg.built and root_slot in self._rebuild.rebuild_list:
3555 if (pkg.installed and
3556 root_slot in self._rebuild.reinstall_list):
3559 if not pkg.installed and \
3560 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3561 modified_use=self._pkg_use_enabled(pkg)):
3564 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
3565 modified_use=self._pkg_use_enabled(pkg)):
3568 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
3569 modified_use=self._pkg_use_enabled(pkg))
3571 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
3572 (not pkg.installed or dont_miss_updates):
3573 # Check if a higher version was rejected due to user
3574 # USE configuration. The packages_with_invalid_use_config
3575 # list only contains unbuilt ebuilds since USE can't
3576 # be changed for built packages.
3577 higher_version_rejected = False
3578 repo_priority = pkg.repo_priority
3579 for rejected in packages_with_invalid_use_config:
3580 if rejected.cp != pkg.cp:
3583 higher_version_rejected = True
3585 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
3586 # If version is identical then compare
3587 # repo priority (see bug #350254).
3588 rej_repo_priority = rejected.repo_priority
3589 if rej_repo_priority is not None and \
3590 (repo_priority is None or
3591 rej_repo_priority > repo_priority):
3592 higher_version_rejected = True
3594 if higher_version_rejected:
3598 # Make --noreplace take precedence over --newuse.
3599 if not pkg.installed and noreplace and \
3600 cpv in vardb.match(atom):
3601 inst_pkg = self._pkg(pkg.cpv, "installed",
3602 root_config, installed=True)
3603 if inst_pkg.visible:
3604 # If the installed version is masked, it may
3605 # be necessary to look at lower versions,
3606 # in case there is a visible downgrade.
3608 reinstall_for_flags = None
3610 if not pkg.installed or \
3611 (matched_packages and not avoid_update):
3612 # Only enforce visibility on installed packages
3613 # if there is at least one other visible package
3614 # available. By filtering installed masked packages
3615 # here, packages that have been masked since they
3616 # were installed can be automatically downgraded
3617 # to an unmasked version. NOTE: This code needs to
3618 # be consistent with masking behavior inside
3619 # _dep_check_composite_db, in order to prevent
3620 # incorrect choices in || deps like bug #351828.
3622 if not self._pkg_visibility_check(pkg, \
3623 allow_unstable_keywords=allow_unstable_keywords,
3624 allow_license_changes=allow_license_changes,
3625 allow_unmasks=allow_unmasks):
3628 # Enable upgrade or downgrade to a version
3629 # with visible KEYWORDS when the installed
3630 # version is masked by KEYWORDS, but never
3631 # reinstall the same exact version only due
3632 # to a KEYWORDS mask. See bug #252167.
3634 if pkg.type_name != "ebuild" and matched_packages:
3635 # Don't re-install a binary package that is
3636 # identical to the currently installed package
3637 # (see bug #354441).
3638 identical_binary = False
3639 if usepkg and pkg.installed:
3640 for selected_pkg in matched_packages:
3641 if selected_pkg.type_name == "binary" and \
3642 selected_pkg.cpv == pkg.cpv and \
3643 selected_pkg.metadata.get('BUILD_TIME') == \
3644 pkg.metadata.get('BUILD_TIME'):
3645 identical_binary = True
3648 if not identical_binary:
3649 # If the ebuild no longer exists or it's
3650 # keywords have been dropped, reject built
3651 # instances (installed or binary).
3652 # If --usepkgonly is enabled, assume that
3653 # the ebuild status should be ignored.
3654 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
3655 if pkg.installed and pkg.masks:
3660 pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
3661 except portage.exception.PackageNotFound:
3662 pkg_eb_visible = False
3663 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3664 "ebuild", Atom("=%s" % (pkg.cpv,))):
3665 if self._pkg_visibility_check(pkg_eb, \
3666 allow_unstable_keywords=allow_unstable_keywords,
3667 allow_license_changes=allow_license_changes,
3668 allow_unmasks=allow_unmasks):
3669 pkg_eb_visible = True
3671 if not pkg_eb_visible:
3674 if not self._pkg_visibility_check(pkg_eb, \
3675 allow_unstable_keywords=allow_unstable_keywords,
3676 allow_license_changes=allow_license_changes,
3677 allow_unmasks=allow_unmasks):
3680 # Calculation of USE for unbuilt ebuilds is relatively
3681 # expensive, so it is only performed lazily, after the
3682 # above visibility checks are complete.
3685 if root == self._frozen_config.target_root:
3687 myarg = next(self._iter_atoms_for_pkg(pkg))
3688 except StopIteration:
3690 except portage.exception.InvalidDependString:
3692 # masked by corruption
3694 if not installed and myarg:
3695 found_available_arg = True
3697 if atom.unevaluated_atom.use:
3698 #Make sure we don't miss a 'missing IUSE'.
3699 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3700 # Don't add this to packages_with_invalid_use_config
3701 # since IUSE cannot be adjusted by the user.
3706 matched_pkgs_ignore_use.append(pkg)
3707 if allow_use_changes:
3709 for flag in atom.use.enabled:
3710 target_use[flag] = True
3711 for flag in atom.use.disabled:
3712 target_use[flag] = False
3713 use = self._pkg_use_enabled(pkg, target_use)
3715 use = self._pkg_use_enabled(pkg)
3718 can_adjust_use = not pkg.built
3719 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
3720 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
3722 if atom.use.enabled:
3723 if atom.use.enabled.intersection(missing_disabled):
3725 can_adjust_use = False
3726 need_enabled = atom.use.enabled.difference(use)
3728 need_enabled = need_enabled.difference(missing_enabled)
3732 if pkg.use.mask.intersection(need_enabled):
3733 can_adjust_use = False
3735 if atom.use.disabled:
3736 if atom.use.disabled.intersection(missing_enabled):
3738 can_adjust_use = False
3739 need_disabled = atom.use.disabled.intersection(use)
3741 need_disabled = need_disabled.difference(missing_disabled)
3745 if pkg.use.force.difference(
3746 pkg.use.mask).intersection(need_disabled):
3747 can_adjust_use = False
3751 # Above we must ensure that this package has
3752 # absolutely no use.force, use.mask, or IUSE
3753 # issues that the user typically can't make
3754 # adjustments to solve (see bug #345979).
3755 # FIXME: Conditional USE deps complicate
3756 # issues. This code currently excludes cases
3757 # in which the user can adjust the parent
3758 # package's USE in order to satisfy the dep.
3759 packages_with_invalid_use_config.append(pkg)
3762 if pkg.cp == atom_cp:
3763 if highest_version is None:
3764 highest_version = pkg
3765 elif pkg > highest_version:
3766 highest_version = pkg
3767 # At this point, we've found the highest visible
3768 # match from the current repo. Any lower versions
3769 # from this repo are ignored, so this so the loop
3770 # will always end with a break statement below
3772 if find_existing_node:
3773 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3776 # Use PackageSet.findAtomForPackage()
3777 # for PROVIDE support.
3778 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
3779 if highest_version and \
3780 e_pkg.cp == atom_cp and \
3781 e_pkg < highest_version and \
3782 e_pkg.slot_atom != highest_version.slot_atom:
3783 # There is a higher version available in a
3784 # different slot, so this existing node is
3788 matched_packages.append(e_pkg)
3789 existing_node = e_pkg
3791 # Compare built package to current config and
3792 # reject the built package if necessary.
3793 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
3794 ("--newuse" in self._frozen_config.myopts or \
3795 "--reinstall" in self._frozen_config.myopts or \
3796 "--binpkg-respect-use" in self._frozen_config.myopts):
3797 iuses = pkg.iuse.all
3798 old_use = self._pkg_use_enabled(pkg)
3800 pkgsettings.setcpv(myeb)
3802 pkgsettings.setcpv(pkg)
3803 now_use = pkgsettings["PORTAGE_USE"].split()
3804 forced_flags = set()
3805 forced_flags.update(pkgsettings.useforce)
3806 forced_flags.update(pkgsettings.usemask)
3808 if myeb and not usepkgonly and not useoldpkg:
3809 cur_iuse = myeb.iuse.all
3810 if self._reinstall_for_flags(forced_flags,
3814 # Compare current config to installed package
3815 # and do not reinstall if possible.
3816 if not installed and not useoldpkg and \
3817 ("--newuse" in self._frozen_config.myopts or \
3818 "--reinstall" in self._frozen_config.myopts) and \
3819 cpv in vardb.match(atom):
3820 forced_flags = set()
3821 forced_flags.update(pkg.use.force)
3822 forced_flags.update(pkg.use.mask)
3823 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
3824 old_use = inst_pkg.use.enabled
3825 old_iuse = inst_pkg.iuse.all
3826 cur_use = self._pkg_use_enabled(pkg)
3827 cur_iuse = pkg.iuse.all
3828 reinstall_for_flags = \
3829 self._reinstall_for_flags(
3830 forced_flags, old_use, old_iuse,
3832 if reinstall_for_flags:
3834 if reinstall_atoms.findAtomForPackage(pkg, \
3835 modified_use=self._pkg_use_enabled(pkg)):
3840 matched_oldpkg.append(pkg)
3841 matched_packages.append(pkg)
3842 if reinstall_for_flags:
3843 self._dynamic_config._reinstall_nodes[pkg] = \
3847 if not matched_packages:
3850 if "--debug" in self._frozen_config.myopts:
3851 for pkg in matched_packages:
3852 portage.writemsg("%s %s\n" % \
3853 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3855 # Filter out any old-style virtual matches if they are
3856 # mixed with new-style virtual matches.
3858 if len(matched_packages) > 1 and \
3859 "virtual" == portage.catsplit(cp)[0]:
3860 for pkg in matched_packages:
3863 # Got a new-style virtual, so filter
3864 # out any old-style virtuals.
3865 matched_packages = [pkg for pkg in matched_packages \
3869 if existing_node is not None and \
3870 existing_node in matched_packages:
3871 return existing_node, existing_node
3873 if len(matched_packages) > 1:
3874 if rebuilt_binaries:
3877 for pkg in matched_packages:
3882 if built_pkg is not None and inst_pkg is not None:
3883 # Only reinstall if binary package BUILD_TIME is
3884 # non-empty, in order to avoid cases like to
3885 # bug #306659 where BUILD_TIME fields are missing
3886 # in local and/or remote Packages file.
3888 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
3889 except (KeyError, ValueError):
3893 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
3894 except (KeyError, ValueError):
3895 installed_timestamp = 0
3897 if "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
3898 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
3899 if built_timestamp and \
3900 built_timestamp > installed_timestamp and \
3901 built_timestamp >= minimal_timestamp:
3902 return built_pkg, existing_node
3904 #Don't care if the binary has an older BUILD_TIME than the installed
3905 #package. This is for closely tracking a binhost.
3906 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
3908 if built_timestamp and \
3909 built_timestamp != installed_timestamp:
3910 return built_pkg, existing_node
3912 for pkg in matched_packages:
3913 if pkg.installed and pkg.invalid:
3914 matched_packages = [x for x in \
3915 matched_packages if x is not pkg]
3918 for pkg in matched_packages:
3919 if pkg.installed and self._pkg_visibility_check(pkg, \
3920 allow_unstable_keywords=allow_unstable_keywords,
3921 allow_license_changes=allow_license_changes,
3922 allow_unmasks=allow_unmasks):
3923 return pkg, existing_node
3925 visible_matches = []
3927 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
3928 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3929 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
3930 if not visible_matches:
3931 visible_matches = [pkg.cpv for pkg in matched_packages \
3932 if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
3933 allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
3935 bestmatch = portage.best(visible_matches)
3937 # all are masked, so ignore visibility
3938 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
3939 matched_packages = [pkg for pkg in matched_packages \
3940 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3942 # ordered by type preference ("ebuild" type is the last resort)
3943 return matched_packages[-1], existing_node
3945 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3947 Select packages that have already been added to the graph or
3948 those that are installed and have not been scheduled for
3951 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
3952 matches = graph_db.match_pkgs(atom)
3955 pkg = matches[-1] # highest match
3956 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3957 return pkg, in_graph
3959 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
3961 Select packages that are installed.
3963 vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
3964 matches = vardb.match_pkgs(atom)
3967 if len(matches) > 1:
3968 unmasked = [pkg for pkg in matches if \
3969 self._pkg_visibility_check(pkg)]
3971 if len(unmasked) == 1:
3974 # Account for packages with masks (like KEYWORDS masks)
3975 # that are usually ignored in visibility checks for
3976 # installed packages, in order to handle cases like
3978 unmasked = [pkg for pkg in matches if not pkg.masks]
3981 pkg = matches[-1] # highest match
3982 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3983 return pkg, in_graph
3985 def _complete_graph(self, required_sets=None):
3987 Add any deep dependencies of required sets (args, system, world) that
3988 have not been pulled into the graph yet. This ensures that the graph
3989 is consistent such that initially satisfied deep dependencies are not
3990 broken in the new graph. Initially unsatisfied dependencies are
3991 irrelevant since we only want to avoid breaking dependencies that are
3992 initially satisfied.
3994 Since this method can consume enough time to disturb users, it is
3995 currently only enabled by the --complete-graph option.
3997 @param required_sets: contains required sets (currently only used
3998 for depclean and prune removal operations)
3999 @type required_sets: dict
4001 if "--buildpkgonly" in self._frozen_config.myopts or \
4002 "recurse" not in self._dynamic_config.myparams:
4005 if "complete" not in self._dynamic_config.myparams:
4006 # Automatically enable complete mode if there are any
4007 # downgrades, since they often break dependencies
4008 # (like in bug #353613).
4009 have_downgrade = False
4010 for node in self._dynamic_config.digraph:
4011 if not isinstance(node, Package) or \
4012 node.operation != "merge":
4014 vardb = self._frozen_config.roots[
4015 node.root].trees["vartree"].dbapi
4016 inst_pkg = vardb.match_pkgs(node.slot_atom)
4017 if inst_pkg and inst_pkg[0] > node:
4018 have_downgrade = True
4022 self._dynamic_config.myparams["complete"] = True
4024 # Skip complete graph mode, in order to avoid consuming
4025 # enough time to disturb users.
4030 # Put the depgraph into a mode that causes it to only
4031 # select packages that have already been added to the
4032 # graph or those that are installed and have not been
4033 # scheduled for replacement. Also, toggle the "deep"
4034 # parameter so that all dependencies are traversed and
4036 self._select_atoms = self._select_atoms_from_graph
4037 if "remove" in self._dynamic_config.myparams:
4038 self._select_package = self._select_pkg_from_installed
4040 self._select_package = self._select_pkg_from_graph
4041 self._dynamic_config._traverse_ignored_deps = True
4042 already_deep = self._dynamic_config.myparams.get("deep") is True
4043 if not already_deep:
4044 self._dynamic_config.myparams["deep"] = True
4046 # Invalidate the package selection cache, since
4047 # _select_package has just changed implementations.
4048 for trees in self._dynamic_config._filtered_trees.values():
4049 trees["porttree"].dbapi._clear_cache()
4051 args = self._dynamic_config._initial_arg_list[:]
4052 for root in self._frozen_config.roots:
4053 if root != self._frozen_config.target_root and \
4054 "remove" in self._dynamic_config.myparams:
4055 # Only pull in deps for the relevant root.
4057 depgraph_sets = self._dynamic_config.sets[root]
4058 required_set_names = self._frozen_config._required_set_names.copy()
4059 remaining_args = required_set_names.copy()
4060 if required_sets is None or root not in required_sets:
4063 # Removal actions may override sets with temporary
4064 # replacements that have had atoms removed in order
4065 # to implement --deselect behavior.
4066 required_set_names = set(required_sets[root])
4067 depgraph_sets.sets.clear()
4068 depgraph_sets.sets.update(required_sets[root])
4069 if "remove" not in self._dynamic_config.myparams and \
4070 root == self._frozen_config.target_root and \
4072 remaining_args.difference_update(depgraph_sets.sets)
4073 if not remaining_args and \
4074 not self._dynamic_config._ignored_deps and \
4075 not self._dynamic_config._dep_stack:
4077 root_config = self._frozen_config.roots[root]
4078 for s in required_set_names:
4079 pset = depgraph_sets.sets.get(s)
4081 pset = root_config.sets[s]
4082 atom = SETPREFIX + s
4083 args.append(SetArg(arg=atom, pset=pset,
4084 root_config=root_config))
4086 self._set_args(args)
4087 for arg in self._expand_set_args(args, add_to_digraph=True):
4088 for atom in arg.pset.getAtoms():
4089 self._dynamic_config._dep_stack.append(
4090 Dependency(atom=atom, root=arg.root_config.root,
4094 if self._dynamic_config._ignored_deps:
4095 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4096 self._dynamic_config._ignored_deps = []
4097 if not self._create_graph(allow_unsatisfied=True):
4099 # Check the unsatisfied deps to see if any initially satisfied deps
4100 # will become unsatisfied due to an upgrade. Initially unsatisfied
4101 # deps are irrelevant since we only want to avoid breaking deps
4102 # that are initially satisfied.
4103 while self._dynamic_config._unsatisfied_deps:
4104 dep = self._dynamic_config._unsatisfied_deps.pop()
4105 vardb = self._frozen_config.roots[
4106 dep.root].trees["vartree"].dbapi
4107 matches = vardb.match_pkgs(dep.atom)
4109 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4111 # An scheduled installation broke a deep dependency.
4112 # Add the installed package to the graph so that it
4113 # will be appropriately reported as a slot collision
4114 # (possibly solvable via backtracking).
4115 pkg = matches[-1] # highest match
4116 if not self._add_pkg(pkg, dep):
4118 if not self._create_graph(allow_unsatisfied=True):
4122 def _pkg(self, cpv, type_name, root_config, installed=False,
4123 onlydeps=False, myrepo = None):
4125 Get a package instance from the cache, or create a new
4126 one if necessary. Raises PackageNotFound from aux_get if it
4127 failures for some reason (package does not exist or is
4130 if type_name != "ebuild":
4131 # For installed (and binary) packages we don't care for the repo
4132 # when it comes to hashing, because there can only be one cpv.
4133 # So overwrite the repo_key with type_name.
4134 repo_key = type_name
4136 elif myrepo is None:
4137 raise AssertionError(
4138 "depgraph._pkg() called without 'myrepo' argument")
4143 if installed or onlydeps:
4144 operation = "nomerge"
4145 # Ensure that we use the specially optimized RootConfig instance
4146 # that refers to FakeVartree instead of the real vartree.
4147 root_config = self._frozen_config.roots[root_config.root]
4148 pkg = self._frozen_config._pkg_cache.get(
4149 (type_name, root_config.root, cpv, operation, repo_key))
4150 if pkg is None and onlydeps and not installed:
4151 # Maybe it already got pulled in as a "merge" node.
4152 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4153 (type_name, root_config.root, cpv, 'merge', repo_key))
4156 tree_type = self.pkg_tree_map[type_name]
4157 db = root_config.trees[tree_type].dbapi
4158 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4159 tree_type].dbapi._aux_cache_keys)
4162 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4164 raise portage.exception.PackageNotFound(cpv)
4166 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4167 installed=installed, metadata=metadata, onlydeps=onlydeps,
4168 root_config=root_config, type_name=type_name)
4170 self._frozen_config._pkg_cache[pkg] = pkg
4172 if not self._pkg_visibility_check(pkg) and \
4173 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4174 slot_key = (pkg.root, pkg.slot_atom)
4175 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4176 if other_pkg is None or pkg > other_pkg:
4177 self._frozen_config._highest_license_masked[slot_key] = pkg
4181 def _validate_blockers(self):
4182 """Remove any blockers from the digraph that do not match any of the
4183 packages within the graph. If necessary, create hard deps to ensure
4184 correct merge order such that mutually blocking packages are never
4185 installed simultaneously."""
4187 if "--buildpkgonly" in self._frozen_config.myopts or \
4188 "--nodeps" in self._frozen_config.myopts:
4191 complete = "complete" in self._dynamic_config.myparams
4192 deep = "deep" in self._dynamic_config.myparams
4195 # Pull in blockers from all installed packages that haven't already
4196 # been pulled into the depgraph. This is not enabled by default
4197 # due to the performance penalty that is incurred by all the
4198 # additional dep_check calls that are required.
4200 # For installed packages, always ignore blockers from DEPEND since
4201 # only runtime dependencies should be relevant for packages that
4202 # are already built.
4203 dep_keys = ["RDEPEND", "PDEPEND"]
4204 for myroot in self._frozen_config.trees:
4205 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4206 portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
4207 pkgsettings = self._frozen_config.pkgsettings[myroot]
4208 root_config = self._frozen_config.roots[myroot]
4209 dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
4210 final_db = self._dynamic_config.mydbapi[myroot]
4212 blocker_cache = BlockerCache(myroot, vardb)
4213 stale_cache = set(blocker_cache)
4216 stale_cache.discard(cpv)
4217 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4219 pkg in self._dynamic_config._traversed_pkg_deps
4221 # Check for masked installed packages. Only warn about
4222 # packages that are in the graph in order to avoid warning
4223 # about those that will be automatically uninstalled during
4224 # the merge process or by --depclean. Always warn about
4225 # packages masked by license, since the user likely wants
4226 # to adjust ACCEPT_LICENSE.
4228 if not self._pkg_visibility_check(pkg) and \
4229 (pkg_in_graph or 'LICENSE' in pkg.masks):
4230 self._dynamic_config._masked_installed.add(pkg)
4232 self._check_masks(pkg)
4234 blocker_atoms = None
4240 self._dynamic_config._blocker_parents.child_nodes(pkg))
4245 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4249 # Select just the runtime blockers.
4250 blockers = [blocker for blocker in blockers \
4251 if blocker.priority.runtime or \
4252 blocker.priority.runtime_post]
4253 if blockers is not None:
4254 blockers = set(blocker.atom for blocker in blockers)
4256 # If this node has any blockers, create a "nomerge"
4257 # node for it so that they can be enforced.
4258 self._spinner_update()
4259 blocker_data = blocker_cache.get(cpv)
4260 if blocker_data is not None and \
4261 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4264 # If blocker data from the graph is available, use
4265 # it to validate the cache and update the cache if
4267 if blocker_data is not None and \
4268 blockers is not None:
4269 if not blockers.symmetric_difference(
4270 blocker_data.atoms):
4274 if blocker_data is None and \
4275 blockers is not None:
4276 # Re-use the blockers from the graph.
4277 blocker_atoms = sorted(blockers)
4278 counter = long(pkg.metadata["COUNTER"])
4280 blocker_cache.BlockerData(counter, blocker_atoms)
4281 blocker_cache[pkg.cpv] = blocker_data
4285 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4287 # Use aux_get() to trigger FakeVartree global
4288 # updates on *DEPEND when appropriate.
4289 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4290 # It is crucial to pass in final_db here in order to
4291 # optimize dep_check calls by eliminating atoms via
4292 # dep_wordreduce and dep_eval calls.
4294 success, atoms = portage.dep_check(depstr,
4295 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4296 trees=self._dynamic_config._graph_trees, myroot=myroot)
4299 except Exception as e:
4300 # This is helpful, for example, if a ValueError
4301 # is thrown from cpv_expand due to multiple
4302 # matches (this can happen if an atom lacks a
4304 show_invalid_depstring_notice(
4305 pkg, depstr, str(e))
4309 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4310 if replacement_pkg and \
4311 replacement_pkg[0].operation == "merge":
4312 # This package is being replaced anyway, so
4313 # ignore invalid dependencies so as not to
4314 # annoy the user too much (otherwise they'd be
4315 # forced to manually unmerge it first).
4317 show_invalid_depstring_notice(pkg, depstr, atoms)
4319 blocker_atoms = [myatom for myatom in atoms \
4321 blocker_atoms.sort()
4322 counter = long(pkg.metadata["COUNTER"])
4323 blocker_cache[cpv] = \
4324 blocker_cache.BlockerData(counter, blocker_atoms)
4327 for atom in blocker_atoms:
4328 blocker = Blocker(atom=atom,
4329 eapi=pkg.metadata["EAPI"],
4330 priority=self._priority(runtime=True),
4332 self._dynamic_config._blocker_parents.add(blocker, pkg)
4333 except portage.exception.InvalidAtom as e:
4334 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4335 show_invalid_depstring_notice(
4336 pkg, depstr, "Invalid Atom: %s" % (e,))
4338 for cpv in stale_cache:
4339 del blocker_cache[cpv]
4340 blocker_cache.flush()
4343 # Discard any "uninstall" tasks scheduled by previous calls
4344 # to this method, since those tasks may not make sense given
4345 # the current graph state.
4346 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
4347 if previous_uninstall_tasks:
4348 self._dynamic_config._blocker_uninstalls = digraph()
4349 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
4351 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
4352 self._spinner_update()
4353 root_config = self._frozen_config.roots[blocker.root]
4354 virtuals = root_config.settings.getvirtuals()
4355 myroot = blocker.root
4356 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
4357 final_db = self._dynamic_config.mydbapi[myroot]
4359 provider_virtual = False
4360 if blocker.cp in virtuals and \
4361 not self._have_new_virt(blocker.root, blocker.cp):
4362 provider_virtual = True
4364 # Use this to check PROVIDE for each matched package
4366 atom_set = InternalPackageSet(
4367 initial_atoms=[blocker.atom])
4369 if provider_virtual:
4371 for provider_entry in virtuals[blocker.cp]:
4372 atoms.append(Atom(blocker.atom.replace(
4373 blocker.cp, provider_entry.cp, 1)))
4375 atoms = [blocker.atom]
4377 blocked_initial = set()
4379 for pkg in initial_db.match_pkgs(atom):
4380 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4381 blocked_initial.add(pkg)
4383 blocked_final = set()
4385 for pkg in final_db.match_pkgs(atom):
4386 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4387 blocked_final.add(pkg)
4389 if not blocked_initial and not blocked_final:
4390 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
4391 self._dynamic_config._blocker_parents.remove(blocker)
4392 # Discard any parents that don't have any more blockers.
4393 for pkg in parent_pkgs:
4394 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
4395 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
4396 self._dynamic_config._blocker_parents.remove(pkg)
4398 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4399 unresolved_blocks = False
4400 depends_on_order = set()
4401 for pkg in blocked_initial:
4402 if pkg.slot_atom == parent.slot_atom and \
4403 not blocker.atom.blocker.overlap.forbid:
4404 # New !!atom blockers do not allow temporary
4405 # simulaneous installation, so unlike !atom
4406 # blockers, !!atom blockers aren't ignored
4407 # when they match other packages occupying
4410 if parent.installed:
4411 # Two currently installed packages conflict with
4412 # eachother. Ignore this case since the damage
4413 # is already done and this would be likely to
4414 # confuse users if displayed like a normal blocker.
4417 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4419 if parent.operation == "merge":
4420 # Maybe the blocked package can be replaced or simply
4421 # unmerged to resolve this block.
4422 depends_on_order.add((pkg, parent))
4424 # None of the above blocker resolutions techniques apply,
4425 # so apparently this one is unresolvable.
4426 unresolved_blocks = True
4427 for pkg in blocked_final:
4428 if pkg.slot_atom == parent.slot_atom and \
4429 not blocker.atom.blocker.overlap.forbid:
4430 # New !!atom blockers do not allow temporary
4431 # simulaneous installation, so unlike !atom
4432 # blockers, !!atom blockers aren't ignored
4433 # when they match other packages occupying
4436 if parent.operation == "nomerge" and \
4437 pkg.operation == "nomerge":
4438 # This blocker will be handled the next time that a
4439 # merge of either package is triggered.
4442 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4444 # Maybe the blocking package can be
4445 # unmerged to resolve this block.
4446 if parent.operation == "merge" and pkg.installed:
4447 depends_on_order.add((pkg, parent))
4449 elif parent.operation == "nomerge":
4450 depends_on_order.add((parent, pkg))
4452 # None of the above blocker resolutions techniques apply,
4453 # so apparently this one is unresolvable.
4454 unresolved_blocks = True
4456 # Make sure we don't unmerge any package that have been pulled
4458 if not unresolved_blocks and depends_on_order:
4459 for inst_pkg, inst_task in depends_on_order:
4460 if self._dynamic_config.digraph.contains(inst_pkg) and \
4461 self._dynamic_config.digraph.parent_nodes(inst_pkg):
4462 unresolved_blocks = True
4465 if not unresolved_blocks and depends_on_order:
4466 for inst_pkg, inst_task in depends_on_order:
4467 uninst_task = Package(built=inst_pkg.built,
4468 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4469 metadata=inst_pkg.metadata,
4470 operation="uninstall",
4471 root_config=inst_pkg.root_config,
4472 type_name=inst_pkg.type_name)
4473 # Enforce correct merge order with a hard dep.
4474 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
4475 priority=BlockerDepPriority.instance)
4476 # Count references to this blocker so that it can be
4477 # invalidated after nodes referencing it have been
4479 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
4480 if not unresolved_blocks and not depends_on_order:
4481 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
4482 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
4483 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
4484 self._dynamic_config._blocker_parents.remove(blocker)
4485 if not self._dynamic_config._blocker_parents.child_nodes(parent):
4486 self._dynamic_config._blocker_parents.remove(parent)
4487 if unresolved_blocks:
4488 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
4492 def _accept_blocker_conflicts(self):
4494 for x in ("--buildpkgonly", "--fetchonly",
4495 "--fetch-all-uri", "--nodeps"):
4496 if x in self._frozen_config.myopts:
4501 def _merge_order_bias(self, mygraph):
4503 For optimal leaf node selection, promote deep system runtime deps and
4504 order nodes from highest to lowest overall reference count.
4508 for node in mygraph.order:
4509 node_info[node] = len(mygraph.parent_nodes(node))
4510 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4512 def cmp_merge_preference(node1, node2):
4514 if node1.operation == 'uninstall':
4515 if node2.operation == 'uninstall':
4519 if node2.operation == 'uninstall':
4520 if node1.operation == 'uninstall':
4524 node1_sys = node1 in deep_system_deps
4525 node2_sys = node2 in deep_system_deps
4526 if node1_sys != node2_sys:
4531 return node_info[node2] - node_info[node1]
4533 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4535 def altlist(self, reversed=False):
4537 while self._dynamic_config._serialized_tasks_cache is None:
4538 self._resolve_conflicts()
4540 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
4541 self._serialize_tasks()
4542 except self._serialize_tasks_retry:
4545 retlist = self._dynamic_config._serialized_tasks_cache[:]
4550 def _implicit_libc_deps(self, mergelist, graph):
4552 Create implicit dependencies on libc, in order to ensure that libc
4553 is installed as early as possible (see bug #303567).
4556 implicit_libc_roots = (self._frozen_config._running_root.root,)
4557 for root in implicit_libc_roots:
4558 graphdb = self._dynamic_config.mydbapi[root]
4559 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4560 for atom in self._expand_virt_from_graph(root,
4561 portage.const.LIBC_PACKAGE_ATOM):
4564 match = graphdb.match_pkgs(atom)
4568 if pkg.operation == "merge" and \
4569 not vardb.cpv_exists(pkg.cpv):
4570 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
4575 earlier_libc_pkgs = set()
4577 for pkg in mergelist:
4578 if not isinstance(pkg, Package):
4579 # a satisfied blocker
4581 root_libc_pkgs = libc_pkgs.get(pkg.root)
4582 if root_libc_pkgs is not None and \
4583 pkg.operation == "merge":
4584 if pkg in root_libc_pkgs:
4585 earlier_libc_pkgs.add(pkg)
4587 for libc_pkg in root_libc_pkgs:
4588 if libc_pkg in earlier_libc_pkgs:
4589 graph.add(libc_pkg, pkg,
4590 priority=DepPriority(buildtime=True))
4592 def schedulerGraph(self):
4594 The scheduler graph is identical to the normal one except that
4595 uninstall edges are reversed in specific cases that require
4596 conflicting packages to be temporarily installed simultaneously.
4597 This is intended for use by the Scheduler in it's parallelization
4598 logic. It ensures that temporary simultaneous installation of
4599 conflicting packages is avoided when appropriate (especially for
4600 !!atom blockers), but allowed in specific cases that require it.
4602 Note that this method calls break_refs() which alters the state of
4603 internal Package instances such that this depgraph instance should
4604 not be used to perform any more calculations.
4607 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4608 mergelist = self.altlist()
4609 self._implicit_libc_deps(mergelist,
4610 self._dynamic_config._scheduler_graph)
4612 # Break DepPriority.satisfied attributes which reference
4613 # installed Package instances.
4614 for parents, children, node in \
4615 self._dynamic_config._scheduler_graph.nodes.values():
4616 for priorities in chain(parents.values(), children.values()):
4617 for priority in priorities:
4618 if priority.satisfied:
4619 priority.satisfied = True
4621 pkg_cache = self._frozen_config._pkg_cache
4622 graph = self._dynamic_config._scheduler_graph
4623 trees = self._frozen_config.trees
4624 pruned_pkg_cache = {}
4625 for key, pkg in pkg_cache.items():
4626 if pkg in graph or \
4627 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4628 pruned_pkg_cache[key] = pkg
4631 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4635 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4639 def break_refs(self):
4641 Break any references in Package instances that lead back to the depgraph.
4642 This is useful if you want to hold references to packages without also
4643 holding the depgraph on the heap. It should only be called after the
4644 depgraph and _frozen_config will not be used for any more calculations.
4646 for root_config in self._frozen_config.roots.values():
4647 root_config.update(self._frozen_config._trees_orig[
4648 root_config.root]["root_config"])
4649 # Both instances are now identical, so discard the
4650 # original which should have no other references.
4651 self._frozen_config._trees_orig[
4652 root_config.root]["root_config"] = root_config
4654 def _resolve_conflicts(self):
4655 if not self._complete_graph():
4656 raise self._unknown_internal_error()
4658 if not self._validate_blockers():
4659 self._dynamic_config._skip_restart = True
4660 raise self._unknown_internal_error()
4662 if self._dynamic_config._slot_collision_info:
4663 self._process_slot_conflicts()
4665 def _serialize_tasks(self):
4667 if "--debug" in self._frozen_config.myopts:
4668 writemsg("\ndigraph:\n\n", noiselevel=-1)
4669 self._dynamic_config.digraph.debug_print()
4670 writemsg("\n", noiselevel=-1)
4672 scheduler_graph = self._dynamic_config.digraph.copy()
4674 if '--nodeps' in self._frozen_config.myopts:
4675 # Preserve the package order given on the command line.
4676 return ([node for node in scheduler_graph \
4677 if isinstance(node, Package) \
4678 and node.operation == 'merge'], scheduler_graph)
4680 mygraph=self._dynamic_config.digraph.copy()
4682 removed_nodes = set()
4684 # Prune off all DependencyArg instances since they aren't
4685 # needed, and because of nested sets this is faster than doing
4686 # it with multiple digraph.root_nodes() calls below. This also
4687 # takes care of nested sets that have circular references,
4688 # which wouldn't be matched by digraph.root_nodes().
4689 for node in mygraph:
4690 if isinstance(node, DependencyArg):
4691 removed_nodes.add(node)
4693 mygraph.difference_update(removed_nodes)
4694 removed_nodes.clear()
4696 # Prune "nomerge" root nodes if nothing depends on them, since
4697 # otherwise they slow down merge order calculation. Don't remove
4698 # non-root nodes since they help optimize merge order in some cases
4699 # such as revdep-rebuild.
4702 for node in mygraph.root_nodes():
4703 if not isinstance(node, Package) or \
4704 node.installed or node.onlydeps:
4705 removed_nodes.add(node)
4707 self._spinner_update()
4708 mygraph.difference_update(removed_nodes)
4709 if not removed_nodes:
4711 removed_nodes.clear()
4712 self._merge_order_bias(mygraph)
4713 def cmp_circular_bias(n1, n2):
4715 RDEPEND is stronger than PDEPEND and this function
4716 measures such a strength bias within a circular
4717 dependency relationship.
4719 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4720 ignore_priority=priority_range.ignore_medium_soft)
4721 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4722 ignore_priority=priority_range.ignore_medium_soft)
4723 if n1_n2_medium == n2_n1_medium:
4728 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
4730 # Contains uninstall tasks that have been scheduled to
4731 # occur after overlapping blockers have been installed.
4732 scheduled_uninstalls = set()
4733 # Contains any Uninstall tasks that have been ignored
4734 # in order to avoid the circular deps code path. These
4735 # correspond to blocker conflicts that could not be
4737 ignored_uninstall_tasks = set()
4738 have_uninstall_task = False
4739 complete = "complete" in self._dynamic_config.myparams
4742 def get_nodes(**kwargs):
4744 Returns leaf nodes excluding Uninstall instances
4745 since those should be executed as late as possible.
4747 return [node for node in mygraph.leaf_nodes(**kwargs) \
4748 if isinstance(node, Package) and \
4749 (node.operation != "uninstall" or \
4750 node in scheduled_uninstalls)]
4752 # sys-apps/portage needs special treatment if ROOT="/"
4753 running_root = self._frozen_config._running_root.root
4754 runtime_deps = InternalPackageSet(
4755 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4756 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
4757 PORTAGE_PACKAGE_ATOM)
4758 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
4759 PORTAGE_PACKAGE_ATOM)
4762 running_portage = running_portage[0]
4764 running_portage = None
4766 if replacement_portage:
4767 replacement_portage = replacement_portage[0]
4769 replacement_portage = None
4771 if replacement_portage == running_portage:
4772 replacement_portage = None
4774 if replacement_portage is not None and \
4775 (running_portage is None or \
4776 running_portage.cpv != replacement_portage.cpv or \
4777 '9999' in replacement_portage.cpv or \
4778 'git' in replacement_portage.inherited or \
4779 'git-2' in replacement_portage.inherited):
4780 # update from running_portage to replacement_portage asap
4781 asap_nodes.append(replacement_portage)
4783 if running_portage is not None:
4785 portage_rdepend = self._select_atoms_highest_available(
4786 running_root, running_portage.metadata["RDEPEND"],
4787 myuse=self._pkg_use_enabled(running_portage),
4788 parent=running_portage, strict=False)
4789 except portage.exception.InvalidDependString as e:
4790 portage.writemsg("!!! Invalid RDEPEND in " + \
4791 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4792 (running_root, running_portage.cpv, e), noiselevel=-1)
4794 portage_rdepend = {running_portage : []}
4795 for atoms in portage_rdepend.values():
4796 runtime_deps.update(atom for atom in atoms \
4797 if not atom.blocker)
4799 # Merge libc asap, in order to account for implicit
4800 # dependencies. See bug #303567.
4801 implicit_libc_roots = (running_root,)
4802 for root in implicit_libc_roots:
4804 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4805 graphdb = self._dynamic_config.mydbapi[root]
4806 for atom in self._expand_virt_from_graph(root,
4807 portage.const.LIBC_PACKAGE_ATOM):
4810 match = graphdb.match_pkgs(atom)
4814 if pkg.operation == "merge" and \
4815 not vardb.cpv_exists(pkg.cpv):
4819 # If there's also an os-headers upgrade, we need to
4820 # pull that in first. See bug #328317.
4821 for atom in self._expand_virt_from_graph(root,
4822 portage.const.OS_HEADERS_PACKAGE_ATOM):
4825 match = graphdb.match_pkgs(atom)
4829 if pkg.operation == "merge" and \
4830 not vardb.cpv_exists(pkg.cpv):
4831 asap_nodes.append(pkg)
4833 asap_nodes.extend(libc_pkgs)
4835 def gather_deps(ignore_priority, mergeable_nodes,
4836 selected_nodes, node):
4838 Recursively gather a group of nodes that RDEPEND on
4839 eachother. This ensures that they are merged as a group
4840 and get their RDEPENDs satisfied as soon as possible.
4842 if node in selected_nodes:
4844 if node not in mergeable_nodes:
4846 if node == replacement_portage and \
4847 mygraph.child_nodes(node,
4848 ignore_priority=priority_range.ignore_medium_soft):
4849 # Make sure that portage always has all of it's
4850 # RDEPENDs installed first.
4852 selected_nodes.add(node)
4853 for child in mygraph.child_nodes(node,
4854 ignore_priority=ignore_priority):
4855 if not gather_deps(ignore_priority,
4856 mergeable_nodes, selected_nodes, child):
4860 def ignore_uninst_or_med(priority):
4861 if priority is BlockerDepPriority.instance:
4863 return priority_range.ignore_medium(priority)
4865 def ignore_uninst_or_med_soft(priority):
4866 if priority is BlockerDepPriority.instance:
4868 return priority_range.ignore_medium_soft(priority)
4870 tree_mode = "--tree" in self._frozen_config.myopts
4871 # Tracks whether or not the current iteration should prefer asap_nodes
4872 # if available. This is set to False when the previous iteration
4873 # failed to select any nodes. It is reset whenever nodes are
4874 # successfully selected.
4877 # Controls whether or not the current iteration should drop edges that
4878 # are "satisfied" by installed packages, in order to solve circular
4879 # dependencies. The deep runtime dependencies of installed packages are
4880 # not checked in this case (bug #199856), so it must be avoided
4881 # whenever possible.
4882 drop_satisfied = False
4884 # State of variables for successive iterations that loosen the
4885 # criteria for node selection.
4887 # iteration prefer_asap drop_satisfied
4892 # If no nodes are selected on the last iteration, it is due to
4893 # unresolved blockers or circular dependencies.
4895 while not mygraph.empty():
4896 self._spinner_update()
4897 selected_nodes = None
4898 ignore_priority = None
4899 if drop_satisfied or (prefer_asap and asap_nodes):
4900 priority_range = DepPrioritySatisfiedRange
4902 priority_range = DepPriorityNormalRange
4903 if prefer_asap and asap_nodes:
4904 # ASAP nodes are merged before their soft deps. Go ahead and
4905 # select root nodes here if necessary, since it's typical for
4906 # the parent to have been removed from the graph already.
4907 asap_nodes = [node for node in asap_nodes \
4908 if mygraph.contains(node)]
4909 for node in asap_nodes:
4910 if not mygraph.child_nodes(node,
4911 ignore_priority=priority_range.ignore_soft):
4912 selected_nodes = [node]
4913 asap_nodes.remove(node)
4915 if not selected_nodes and \
4916 not (prefer_asap and asap_nodes):
4917 for i in range(priority_range.NONE,
4918 priority_range.MEDIUM_SOFT + 1):
4919 ignore_priority = priority_range.ignore_priority[i]
4920 nodes = get_nodes(ignore_priority=ignore_priority)
4922 # If there is a mixture of merges and uninstalls,
4923 # do the uninstalls first.
4925 good_uninstalls = []
4927 if node.operation == "uninstall":
4928 good_uninstalls.append(node)
4931 nodes = good_uninstalls
4935 if ignore_priority is None and not tree_mode:
4936 # Greedily pop all of these nodes since no
4937 # relationship has been ignored. This optimization
4938 # destroys --tree output, so it's disabled in tree
4940 selected_nodes = nodes
4942 # For optimal merge order:
4943 # * Only pop one node.
4944 # * Removing a root node (node without a parent)
4945 # will not produce a leaf node, so avoid it.
4946 # * It's normal for a selected uninstall to be a
4947 # root node, so don't check them for parents.
4949 if node.operation == "uninstall" or \
4950 mygraph.parent_nodes(node):
4951 selected_nodes = [node]
4957 if not selected_nodes:
4958 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4960 mergeable_nodes = set(nodes)
4961 if prefer_asap and asap_nodes:
4963 for i in range(priority_range.SOFT,
4964 priority_range.MEDIUM_SOFT + 1):
4965 ignore_priority = priority_range.ignore_priority[i]
4967 if not mygraph.parent_nodes(node):
4969 selected_nodes = set()
4970 if gather_deps(ignore_priority,
4971 mergeable_nodes, selected_nodes, node):
4974 selected_nodes = None
4978 if prefer_asap and asap_nodes and not selected_nodes:
4979 # We failed to find any asap nodes to merge, so ignore
4980 # them for the next iteration.
4984 if selected_nodes and ignore_priority is not None:
4985 # Try to merge ignored medium_soft deps as soon as possible
4986 # if they're not satisfied by installed packages.
4987 for node in selected_nodes:
4988 children = set(mygraph.child_nodes(node))
4989 soft = children.difference(
4990 mygraph.child_nodes(node,
4991 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4992 medium_soft = children.difference(
4993 mygraph.child_nodes(node,
4995 DepPrioritySatisfiedRange.ignore_medium_soft))
4996 medium_soft.difference_update(soft)
4997 for child in medium_soft:
4998 if child in selected_nodes:
5000 if child in asap_nodes:
5002 asap_nodes.append(child)
5004 if selected_nodes and len(selected_nodes) > 1:
5005 if not isinstance(selected_nodes, list):
5006 selected_nodes = list(selected_nodes)
5007 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5009 if not selected_nodes and not myblocker_uninstalls.is_empty():
5010 # An Uninstall task needs to be executed in order to
5011 # avoid conflict if possible.
5014 priority_range = DepPrioritySatisfiedRange
5016 priority_range = DepPriorityNormalRange
5018 mergeable_nodes = get_nodes(
5019 ignore_priority=ignore_uninst_or_med)
5021 min_parent_deps = None
5024 for task in myblocker_uninstalls.leaf_nodes():
5025 # Do some sanity checks so that system or world packages
5026 # don't get uninstalled inappropriately here (only really
5027 # necessary when --complete-graph has not been enabled).
5029 if task in ignored_uninstall_tasks:
5032 if task in scheduled_uninstalls:
5033 # It's been scheduled but it hasn't
5034 # been executed yet due to dependence
5035 # on installation of blocking packages.
5038 root_config = self._frozen_config.roots[task.root]
5039 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5042 if self._dynamic_config.digraph.contains(inst_pkg):
5045 forbid_overlap = False
5046 heuristic_overlap = False
5047 for blocker in myblocker_uninstalls.parent_nodes(task):
5048 if not eapi_has_strong_blocks(blocker.eapi):
5049 heuristic_overlap = True
5050 elif blocker.atom.blocker.overlap.forbid:
5051 forbid_overlap = True
5053 if forbid_overlap and running_root == task.root:
5056 if heuristic_overlap and running_root == task.root:
5057 # Never uninstall sys-apps/portage or it's essential
5058 # dependencies, except through replacement.
5060 runtime_dep_atoms = \
5061 list(runtime_deps.iterAtomsForPackage(task))
5062 except portage.exception.InvalidDependString as e:
5063 portage.writemsg("!!! Invalid PROVIDE in " + \
5064 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5065 (task.root, task.cpv, e), noiselevel=-1)
5069 # Don't uninstall a runtime dep if it appears
5070 # to be the only suitable one installed.
5072 vardb = root_config.trees["vartree"].dbapi
5073 for atom in runtime_dep_atoms:
5074 other_version = None
5075 for pkg in vardb.match_pkgs(atom):
5076 if pkg.cpv == task.cpv and \
5077 pkg.metadata["COUNTER"] == \
5078 task.metadata["COUNTER"]:
5082 if other_version is None:
5088 # For packages in the system set, don't take
5089 # any chances. If the conflict can't be resolved
5090 # by a normal replacement operation then abort.
5093 for atom in root_config.sets[
5094 "system"].iterAtomsForPackage(task):
5097 except portage.exception.InvalidDependString as e:
5098 portage.writemsg("!!! Invalid PROVIDE in " + \
5099 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5100 (task.root, task.cpv, e), noiselevel=-1)
5106 # Note that the world check isn't always
5107 # necessary since self._complete_graph() will
5108 # add all packages from the system and world sets to the
5109 # graph. This just allows unresolved conflicts to be
5110 # detected as early as possible, which makes it possible
5111 # to avoid calling self._complete_graph() when it is
5112 # unnecessary due to blockers triggering an abortion.
5114 # For packages in the world set, go ahead an uninstall
5115 # when necessary, as long as the atom will be satisfied
5116 # in the final state.
5117 graph_db = self._dynamic_config.mydbapi[task.root]
5120 for atom in root_config.sets[
5121 "selected"].iterAtomsForPackage(task):
5123 for pkg in graph_db.match_pkgs(atom):
5130 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5132 except portage.exception.InvalidDependString as e:
5133 portage.writemsg("!!! Invalid PROVIDE in " + \
5134 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5135 (task.root, task.cpv, e), noiselevel=-1)
5141 # Check the deps of parent nodes to ensure that
5142 # the chosen task produces a leaf node. Maybe
5143 # this can be optimized some more to make the
5144 # best possible choice, but the current algorithm
5145 # is simple and should be near optimal for most
5147 self._spinner_update()
5148 mergeable_parent = False
5150 parent_deps.add(task)
5151 for parent in mygraph.parent_nodes(task):
5152 parent_deps.update(mygraph.child_nodes(parent,
5153 ignore_priority=priority_range.ignore_medium_soft))
5154 if min_parent_deps is not None and \
5155 len(parent_deps) >= min_parent_deps:
5156 # This task is no better than a previously selected
5157 # task, so abort search now in order to avoid wasting
5158 # any more cpu time on this task. This increases
5159 # performance dramatically in cases when there are
5160 # hundreds of blockers to solve, like when
5161 # upgrading to a new slot of kde-meta.
5162 mergeable_parent = None
5164 if parent in mergeable_nodes and \
5165 gather_deps(ignore_uninst_or_med_soft,
5166 mergeable_nodes, set(), parent):
5167 mergeable_parent = True
5169 if not mergeable_parent:
5172 if min_parent_deps is None or \
5173 len(parent_deps) < min_parent_deps:
5174 min_parent_deps = len(parent_deps)
5177 if uninst_task is not None and min_parent_deps == 1:
5178 # This is the best possible result, so so abort search
5179 # now in order to avoid wasting any more cpu time.
5182 if uninst_task is not None:
5183 # The uninstall is performed only after blocking
5184 # packages have been merged on top of it. File
5185 # collisions between blocking packages are detected
5186 # and removed from the list of files to be uninstalled.
5187 scheduled_uninstalls.add(uninst_task)
5188 parent_nodes = mygraph.parent_nodes(uninst_task)
5190 # Reverse the parent -> uninstall edges since we want
5191 # to do the uninstall after blocking packages have
5192 # been merged on top of it.
5193 mygraph.remove(uninst_task)
5194 for blocked_pkg in parent_nodes:
5195 mygraph.add(blocked_pkg, uninst_task,
5196 priority=BlockerDepPriority.instance)
5197 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5198 scheduler_graph.add(blocked_pkg, uninst_task,
5199 priority=BlockerDepPriority.instance)
5201 # Sometimes a merge node will render an uninstall
5202 # node unnecessary (due to occupying the same SLOT),
5203 # and we want to avoid executing a separate uninstall
5204 # task in that case.
5205 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5206 ].match_pkgs(uninst_task.slot_atom)
5208 slot_node[0].operation == "merge":
5209 mygraph.add(slot_node[0], uninst_task,
5210 priority=BlockerDepPriority.instance)
5212 # Reset the state variables for leaf node selection and
5213 # continue trying to select leaf nodes.
5215 drop_satisfied = False
5218 if not selected_nodes:
5219 # Only select root nodes as a last resort. This case should
5220 # only trigger when the graph is nearly empty and the only
5221 # remaining nodes are isolated (no parents or children). Since
5222 # the nodes must be isolated, ignore_priority is not needed.
5223 selected_nodes = get_nodes()
5225 if not selected_nodes and not drop_satisfied:
5226 drop_satisfied = True
5229 if not selected_nodes and not myblocker_uninstalls.is_empty():
5230 # If possible, drop an uninstall task here in order to avoid
5231 # the circular deps code path. The corresponding blocker will
5232 # still be counted as an unresolved conflict.
5234 for node in myblocker_uninstalls.leaf_nodes():
5236 mygraph.remove(node)
5241 ignored_uninstall_tasks.add(node)
5244 if uninst_task is not None:
5245 # Reset the state variables for leaf node selection and
5246 # continue trying to select leaf nodes.
5248 drop_satisfied = False
5251 if not selected_nodes:
5252 self._dynamic_config._circular_deps_for_display = mygraph
5253 self._dynamic_config._skip_restart = True
5254 raise self._unknown_internal_error()
5256 # At this point, we've succeeded in selecting one or more nodes, so
5257 # reset state variables for leaf node selection.
5259 drop_satisfied = False
5261 mygraph.difference_update(selected_nodes)
5263 for node in selected_nodes:
5264 if isinstance(node, Package) and \
5265 node.operation == "nomerge":
5268 # Handle interactions between blockers
5269 # and uninstallation tasks.
5270 solved_blockers = set()
5272 if isinstance(node, Package) and \
5273 "uninstall" == node.operation:
5274 have_uninstall_task = True
5277 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
5278 inst_pkg = vardb.match_pkgs(node.slot_atom)
5280 # The package will be replaced by this one, so remove
5281 # the corresponding Uninstall task if necessary.
5282 inst_pkg = inst_pkg[0]
5283 uninst_task = Package(built=inst_pkg.built,
5284 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5285 metadata=inst_pkg.metadata,
5286 operation="uninstall",
5287 root_config=inst_pkg.root_config,
5288 type_name=inst_pkg.type_name)
5290 mygraph.remove(uninst_task)
5294 if uninst_task is not None and \
5295 uninst_task not in ignored_uninstall_tasks and \
5296 myblocker_uninstalls.contains(uninst_task):
5297 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
5298 myblocker_uninstalls.remove(uninst_task)
5299 # Discard any blockers that this Uninstall solves.
5300 for blocker in blocker_nodes:
5301 if not myblocker_uninstalls.child_nodes(blocker):
5302 myblocker_uninstalls.remove(blocker)
5304 self._dynamic_config._unsolvable_blockers:
5305 solved_blockers.add(blocker)
5307 retlist.append(node)
5309 if (isinstance(node, Package) and \
5310 "uninstall" == node.operation) or \
5311 (uninst_task is not None and \
5312 uninst_task in scheduled_uninstalls):
5313 # Include satisfied blockers in the merge list
5314 # since the user might be interested and also
5315 # it serves as an indicator that blocking packages
5316 # will be temporarily installed simultaneously.
5317 for blocker in solved_blockers:
5318 retlist.append(blocker)
5320 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
5321 for node in myblocker_uninstalls.root_nodes():
5322 unsolvable_blockers.add(node)
5324 # If any Uninstall tasks need to be executed in order
5325 # to avoid a conflict, complete the graph with any
5326 # dependencies that may have been initially
5327 # neglected (to ensure that unsafe Uninstall tasks
5328 # are properly identified and blocked from execution).
5329 if have_uninstall_task and \
5331 not unsolvable_blockers:
5332 self._dynamic_config.myparams["complete"] = True
5333 if '--debug' in self._frozen_config.myopts:
5335 msg.append("enabling 'complete' depgraph mode " + \
5336 "due to uninstall task(s):")
5338 for node in retlist:
5339 if isinstance(node, Package) and \
5340 node.operation == 'uninstall':
5341 msg.append("\t%s" % (node,))
5342 writemsg_level("\n%s\n" % \
5343 "".join("%s\n" % line for line in msg),
5344 level=logging.DEBUG, noiselevel=-1)
5345 raise self._serialize_tasks_retry("")
5347 # Set satisfied state on blockers, but not before the
5348 # above retry path, since we don't want to modify the
5349 # state in that case.
5350 for node in retlist:
5351 if isinstance(node, Blocker):
5352 node.satisfied = True
5354 for blocker in unsolvable_blockers:
5355 retlist.append(blocker)
5357 if unsolvable_blockers and \
5358 not self._accept_blocker_conflicts():
5359 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
5360 self._dynamic_config._serialized_tasks_cache = retlist[:]
5361 self._dynamic_config._scheduler_graph = scheduler_graph
5362 self._dynamic_config._skip_restart = True
5363 raise self._unknown_internal_error()
5365 if self._dynamic_config._slot_collision_info and \
5366 not self._accept_blocker_conflicts():
5367 self._dynamic_config._serialized_tasks_cache = retlist[:]
5368 self._dynamic_config._scheduler_graph = scheduler_graph
5369 raise self._unknown_internal_error()
5371 return retlist, scheduler_graph
5373 def _show_circular_deps(self, mygraph):
5374 self._dynamic_config._circular_dependency_handler = \
5375 circular_dependency_handler(self, mygraph)
5376 handler = self._dynamic_config._circular_dependency_handler
5378 self._frozen_config.myopts.pop("--quiet", None)
5379 self._frozen_config.myopts["--verbose"] = True
5380 self._frozen_config.myopts["--tree"] = True
5381 portage.writemsg("\n\n", noiselevel=-1)
5382 self.display(handler.merge_list)
5383 prefix = colorize("BAD", " * ")
5384 portage.writemsg("\n", noiselevel=-1)
5385 portage.writemsg(prefix + "Error: circular dependencies:\n",
5387 portage.writemsg("\n", noiselevel=-1)
5389 if handler.circular_dep_message is None or \
5390 "--debug" in self._frozen_config.myopts:
5391 handler.debug_print()
5392 portage.writemsg("\n", noiselevel=-1)
5394 if handler.circular_dep_message is not None:
5395 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
5397 suggestions = handler.suggestions
5399 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
5400 if len(suggestions) == 1:
5401 writemsg("by applying the following change:\n", noiselevel=-1)
5403 writemsg("by applying " + colorize("bold", "any of") + \
5404 " the following changes:\n", noiselevel=-1)
5405 writemsg("".join(suggestions), noiselevel=-1)
5406 writemsg("\nNote that this change can be reverted, once the package has" + \
5407 " been installed.\n", noiselevel=-1)
5408 if handler.large_cycle_count:
5409 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
5410 "Several changes might be required to resolve all cycles.\n" + \
5411 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
5413 writemsg("\n\n", noiselevel=-1)
5414 writemsg(prefix + "Note that circular dependencies " + \
5415 "can often be avoided by temporarily\n", noiselevel=-1)
5416 writemsg(prefix + "disabling USE flags that trigger " + \
5417 "optional dependencies.\n", noiselevel=-1)
5419 def _show_merge_list(self):
5420 if self._dynamic_config._serialized_tasks_cache is not None and \
5421 not (self._dynamic_config._displayed_list and \
5422 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
5423 self._dynamic_config._displayed_list == \
5424 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
5425 display_list = self._dynamic_config._serialized_tasks_cache[:]
5426 if "--tree" in self._frozen_config.myopts:
5427 display_list.reverse()
5428 self.display(display_list)
5430 def _show_unsatisfied_blockers(self, blockers):
5431 self._show_merge_list()
5432 msg = "Error: The above package list contains " + \
5433 "packages which cannot be installed " + \
5434 "at the same time on the same system."
5435 prefix = colorize("BAD", " * ")
5436 portage.writemsg("\n", noiselevel=-1)
5437 for line in textwrap.wrap(msg, 70):
5438 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5440 # Display the conflicting packages along with the packages
5441 # that pulled them in. This is helpful for troubleshooting
5442 # cases in which blockers don't solve automatically and
5443 # the reasons are not apparent from the normal merge list
5447 for blocker in blockers:
5448 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
5449 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
5450 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
5451 if not parent_atoms:
5452 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
5453 if atom is not None:
5454 parent_atoms = set([("@selected", atom)])
5456 conflict_pkgs[pkg] = parent_atoms
5459 # Reduce noise by pruning packages that are only
5460 # pulled in by other conflict packages.
5462 for pkg, parent_atoms in conflict_pkgs.items():
5463 relevant_parent = False
5464 for parent, atom in parent_atoms:
5465 if parent not in conflict_pkgs:
5466 relevant_parent = True
5468 if not relevant_parent:
5469 pruned_pkgs.add(pkg)
5470 for pkg in pruned_pkgs:
5471 del conflict_pkgs[pkg]
5477 # Max number of parents shown, to avoid flooding the display.
5479 for pkg, parent_atoms in conflict_pkgs.items():
5483 # Prefer packages that are not directly involved in a conflict.
5484 for parent_atom in parent_atoms:
5485 if len(pruned_list) >= max_parents:
5487 parent, atom = parent_atom
5488 if parent not in conflict_pkgs:
5489 pruned_list.add(parent_atom)
5491 for parent_atom in parent_atoms:
5492 if len(pruned_list) >= max_parents:
5494 pruned_list.add(parent_atom)
5496 omitted_parents = len(parent_atoms) - len(pruned_list)
5497 msg.append(indent + "%s pulled in by\n" % pkg)
5499 for parent_atom in pruned_list:
5500 parent, atom = parent_atom
5501 msg.append(2*indent)
5502 if isinstance(parent,
5503 (PackageArg, AtomArg)):
5504 # For PackageArg and AtomArg types, it's
5505 # redundant to display the atom attribute.
5506 msg.append(str(parent))
5508 # Display the specific atom from SetArg or
5510 msg.append("%s required by %s" % (atom, parent))
5514 msg.append(2*indent)
5515 msg.append("(and %d more)\n" % omitted_parents)
5519 sys.stderr.write("".join(msg))
5522 if "--quiet" not in self._frozen_config.myopts:
5523 show_blocker_docs_link()
5525 def display(self, mylist, favorites=[], verbosity=None):
5527 # This is used to prevent display_problems() from
5528 # redundantly displaying this exact same merge list
5529 # again via _show_merge_list().
5530 self._dynamic_config._displayed_list = mylist
5533 return display(self, mylist, favorites, verbosity)
5535 def _display_autounmask(self):
5537 Display --autounmask message and optionally write it to config files
5538 (using CONFIG_PROTECT). The message includes the comments and the changes.
5541 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
5542 pretend = "--pretend" in self._frozen_config.myopts
5543 ask = "--ask" in self._frozen_config.myopts
5544 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
5546 def check_if_latest(pkg):
5548 is_latest_in_slot = True
5549 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
5550 root_config = self._frozen_config.roots[pkg.root]
5552 all_cpv_by_slot = {}
5553 for db, pkg_type, built, installed, db_keys in dbs:
5554 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
5555 slot = other_pkg.metadata["SLOT"]
5556 all_cpv_by_slot.setdefault(slot, set())
5557 all_cpv_by_slot[slot].add(other_pkg.cpv)
5560 for cpvs in all_cpv_by_slot.values():
5561 all_cpv.extend(cpvs)
5562 all_cpv.sort(key=portage.versions.cpv_sort_key())
5564 if all_cpv[-1] != pkg.cpv:
5566 slot_cpvs = sorted(all_cpv_by_slot[pkg.metadata["SLOT"]], key=portage.versions.cpv_sort_key())
5567 if slot_cpvs[-1] != pkg.cpv:
5568 is_latest_in_slot = False
5570 return is_latest, is_latest_in_slot
5572 #Set of roots we have autounmask changes for.
5575 unstable_keyword_msg = {}
5576 for pkg in self._dynamic_config._needed_unstable_keywords:
5577 self._show_merge_list()
5578 if pkg in self._dynamic_config.digraph:
5581 unstable_keyword_msg.setdefault(root, [])
5582 is_latest, is_latest_in_slot = check_if_latest(pkg)
5583 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5584 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5585 use=self._pkg_use_enabled(pkg))
5586 for reason in mreasons:
5587 if reason.unmask_hint and \
5588 reason.unmask_hint.key == 'unstable keyword':
5589 keyword = reason.unmask_hint.value
5591 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
5593 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
5594 elif is_latest_in_slot:
5595 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
5597 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5599 p_mask_change_msg = {}
5600 for pkg in self._dynamic_config._needed_p_mask_changes:
5601 self._show_merge_list()
5602 if pkg in self._dynamic_config.digraph:
5605 p_mask_change_msg.setdefault(root, [])
5606 is_latest, is_latest_in_slot = check_if_latest(pkg)
5607 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5608 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5609 use=self._pkg_use_enabled(pkg))
5610 for reason in mreasons:
5611 if reason.unmask_hint and \
5612 reason.unmask_hint.key == 'p_mask':
5613 keyword = reason.unmask_hint.value
5615 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
5617 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
5618 elif is_latest_in_slot:
5619 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
5621 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5623 use_changes_msg = {}
5624 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
5625 self._show_merge_list()
5626 if pkg in self._dynamic_config.digraph:
5629 use_changes_msg.setdefault(root, [])
5630 is_latest, is_latest_in_slot = check_if_latest(pkg)
5631 changes = needed_use_config_change[1]
5633 for flag, state in changes.items():
5635 adjustments.append(flag)
5637 adjustments.append("-" + flag)
5638 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5640 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5641 elif is_latest_in_slot:
5642 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
5644 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5647 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5648 self._show_merge_list()
5649 if pkg in self._dynamic_config.digraph:
5652 license_msg.setdefault(root, [])
5653 is_latest, is_latest_in_slot = check_if_latest(pkg)
5655 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
5657 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5658 elif is_latest_in_slot:
5659 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
5661 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
5663 def find_config_file(abs_user_config, file_name):
5665 Searches /etc/portage for an appropriate file to append changes to.
5666 If the file_name is a file it is returned, if it is a directory, the
5667 last file in it is returned. Order of traversal is the identical to
5668 portage.util.grablines(recursive=True).
5670 file_name - String containing a file name like "package.use"
5671 return value - String. Absolute path of file to write to. None if
5672 no suitable file exists.
5674 file_path = os.path.join(abs_user_config, file_name)
5675 last_file_path = None
5684 if stat.S_ISREG(st.st_mode):
5686 elif stat.S_ISDIR(st.st_mode):
5688 contents = os.listdir(p)
5692 contents.sort(reverse=True)
5693 for child in contents:
5694 stack.append(os.path.join(p, child))
5696 return last_file_path
5698 write_to_file = autounmask_write and not pretend
5699 #Make sure we have a file to write to before doing any write.
5700 file_to_write_to = {}
5704 abs_user_config = os.path.join(root, USER_CONFIG_PATH)
5706 if root in unstable_keyword_msg:
5707 file_to_write_to[(abs_user_config, "package.keywords")] = \
5708 find_config_file(abs_user_config, "package.keywords")
5710 if root in p_mask_change_msg:
5711 file_to_write_to[(abs_user_config, "package.unmask")] = \
5712 find_config_file(abs_user_config, "package.unmask")
5714 if root in use_changes_msg:
5715 file_to_write_to[(abs_user_config, "package.use")] = \
5716 find_config_file(abs_user_config, "package.use")
5718 if root in license_msg:
5719 file_to_write_to[(abs_user_config, "package.license")] = \
5720 find_config_file(abs_user_config, "package.license")
5722 for (abs_user_config, f), path in file_to_write_to.items():
5724 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
5726 write_to_file = not problems
5729 abs_user_config = os.path.join(root, USER_CONFIG_PATH)
5731 writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
5733 if root in unstable_keyword_msg:
5734 writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
5735 " are necessary to proceed:\n", noiselevel=-1)
5736 writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
5738 if root in p_mask_change_msg:
5739 writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
5740 " are necessary to proceed:\n", noiselevel=-1)
5741 writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
5743 if root in use_changes_msg:
5744 writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
5745 " are necessary to proceed:\n", noiselevel=-1)
5746 writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
5748 if root in license_msg:
5749 writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
5750 " are necessary to proceed:\n", noiselevel=-1)
5751 writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
5756 settings = self._frozen_config.pkgsettings[root]
5757 protect_obj[root] = ConfigProtect(root, \
5758 shlex_split(settings.get("CONFIG_PROTECT", "")),
5759 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
5761 def write_changes(root, changes, file_to_write_to):
5763 file_contents = codecs.open(
5764 _unicode_encode(file_to_write_to,
5765 encoding=_encodings['fs'], errors='strict'),
5766 mode='r', encoding=_encodings['content'],
5767 errors='replace').readlines()
5768 except IOError as e:
5769 problems.append("!!! Failed to read '%s': %s\n" % (file_to_write_to, e))
5771 file_contents.extend(changes)
5772 if protect_obj[root].isprotected(file_to_write_to):
5773 file_to_write_to = new_protect_filename(file_to_write_to)
5775 write_atomic(file_to_write_to, "".join(file_contents))
5776 except PortageException:
5777 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
5779 if ask and write_to_file and file_to_write_to:
5780 prompt = "\nWould you like to add these " + \
5781 "changes to your config files?"
5782 if userquery(prompt, enter_invalid) == 'No':
5783 write_to_file = False
5787 abs_user_config = os.path.join(root, USER_CONFIG_PATH)
5789 if root in unstable_keyword_msg:
5790 write_changes(root, unstable_keyword_msg[root],
5791 file_to_write_to.get((abs_user_config, "package.keywords")))
5793 if root in p_mask_change_msg:
5794 write_changes(root, p_mask_change_msg[root],
5795 file_to_write_to.get((abs_user_config, "package.unmask")))
5797 if root in use_changes_msg:
5798 write_changes(root, use_changes_msg[root],
5799 file_to_write_to.get((abs_user_config, "package.use")))
5801 if root in license_msg:
5802 write_changes(root, license_msg[root],
5803 file_to_write_to.get((abs_user_config, "package.license")))
5806 writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
5808 writemsg_stdout("".join(problems), noiselevel=-1)
5809 elif write_to_file and roots:
5810 writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
5812 elif not pretend and not autounmask_write and roots:
5813 writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
5817 def display_problems(self):
5819 Display problems with the dependency graph such as slot collisions.
5820 This is called internally by display() to show the problems _after_
5821 the merge list where it is most likely to be seen, but if display()
5822 is not going to be called then this method should be called explicitly
5823 to ensure that the user is notified of problems with the graph.
5825 All output goes to stderr, except for unsatisfied dependencies which
5826 go to stdout for parsing by programs such as autounmask.
5829 # Note that show_masked_packages() sends its output to
5830 # stdout, and some programs such as autounmask parse the
5831 # output in cases when emerge bails out. However, when
5832 # show_masked_packages() is called for installed packages
5833 # here, the message is a warning that is more appropriate
5834 # to send to stderr, so temporarily redirect stdout to
5835 # stderr. TODO: Fix output code so there's a cleaner way
5836 # to redirect everything to stderr.
5841 sys.stdout = sys.stderr
5842 self._display_problems()
5848 # This goes to stdout for parsing by programs like autounmask.
5849 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
5850 self._show_unsatisfied_dep(*pargs, **kwargs)
5852 def _display_problems(self):
5853 if self._dynamic_config._circular_deps_for_display is not None:
5854 self._show_circular_deps(
5855 self._dynamic_config._circular_deps_for_display)
5857 # The user is only notified of a slot conflict if
5858 # there are no unresolvable blocker conflicts.
5859 if self._dynamic_config._unsatisfied_blockers_for_display is not None:
5860 self._show_unsatisfied_blockers(
5861 self._dynamic_config._unsatisfied_blockers_for_display)
5862 elif self._dynamic_config._slot_collision_info:
5863 self._show_slot_collision_notice()
5865 self._show_missed_update()
5867 self._display_autounmask()
5869 # TODO: Add generic support for "set problem" handlers so that
5870 # the below warnings aren't special cases for world only.
5872 if self._dynamic_config._missing_args:
5873 world_problems = False
5874 if "world" in self._dynamic_config.sets[
5875 self._frozen_config.target_root].sets:
5876 # Filter out indirect members of world (from nested sets)
5877 # since only direct members of world are desired here.
5878 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
5879 for arg, atom in self._dynamic_config._missing_args:
5880 if arg.name in ("selected", "world") and atom in world_set:
5881 world_problems = True
5885 sys.stderr.write("\n!!! Problems have been " + \
5886 "detected with your world file\n")
5887 sys.stderr.write("!!! Please run " + \
5888 green("emaint --check world")+"\n\n")
5890 if self._dynamic_config._missing_args:
5891 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5892 " Ebuilds for the following packages are either all\n")
5893 sys.stderr.write(colorize("BAD", "!!!") + \
5894 " masked or don't exist:\n")
5895 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5896 self._dynamic_config._missing_args) + "\n")
5898 if self._dynamic_config._pprovided_args:
5900 for arg, atom in self._dynamic_config._pprovided_args:
5901 if isinstance(arg, SetArg):
5903 arg_atom = (atom, atom)
5906 arg_atom = (arg.arg, atom)
5907 refs = arg_refs.setdefault(arg_atom, [])
5908 if parent not in refs:
5911 msg.append(bad("\nWARNING: "))
5912 if len(self._dynamic_config._pprovided_args) > 1:
5913 msg.append("Requested packages will not be " + \
5914 "merged because they are listed in\n")
5916 msg.append("A requested package will not be " + \
5917 "merged because it is listed in\n")
5918 msg.append("package.provided:\n\n")
5919 problems_sets = set()
5920 for (arg, atom), refs in arg_refs.items():
5923 problems_sets.update(refs)
5925 ref_string = ", ".join(["'%s'" % name for name in refs])
5926 ref_string = " pulled in by " + ref_string
5927 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5929 if "selected" in problems_sets or "world" in problems_sets:
5930 msg.append("This problem can be solved in one of the following ways:\n\n")
5931 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5932 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5933 msg.append(" C) Remove offending entries from package.provided.\n\n")
5934 msg.append("The best course of action depends on the reason that an offending\n")
5935 msg.append("package.provided entry exists.\n\n")
5936 sys.stderr.write("".join(msg))
5938 masked_packages = []
5939 for pkg in self._dynamic_config._masked_license_updates:
5940 root_config = pkg.root_config
5941 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5942 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
5943 masked_packages.append((root_config, pkgsettings,
5944 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
5946 writemsg("\n" + colorize("BAD", "!!!") + \
5947 " The following updates are masked by LICENSE changes:\n",
5949 show_masked_packages(masked_packages)
5951 writemsg("\n", noiselevel=-1)
5953 masked_packages = []
5954 for pkg in self._dynamic_config._masked_installed:
5955 root_config = pkg.root_config
5956 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5957 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
5958 masked_packages.append((root_config, pkgsettings,
5959 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
5961 writemsg("\n" + colorize("BAD", "!!!") + \
5962 " The following installed packages are masked:\n",
5964 show_masked_packages(masked_packages)
5966 writemsg("\n", noiselevel=-1)
5968 def saveNomergeFavorites(self):
5969 """Find atoms in favorites that are not in the mergelist and add them
5970 to the world file if necessary."""
5971 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5972 "--oneshot", "--onlydeps", "--pretend"):
5973 if x in self._frozen_config.myopts:
5975 root_config = self._frozen_config.roots[self._frozen_config.target_root]
5976 world_set = root_config.sets["selected"]
5978 world_locked = False
5979 if hasattr(world_set, "lock"):
5983 if hasattr(world_set, "load"):
5984 world_set.load() # maybe it's changed on disk
5986 args_set = self._dynamic_config.sets[
5987 self._frozen_config.target_root].sets['__non_set_args__']
5988 portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
5989 added_favorites = set()
5990 for x in self._dynamic_config._set_nodes:
5991 if x.operation != "nomerge":
5994 if x.root != root_config.root:
5998 myfavkey = create_world_atom(x, args_set, root_config)
6000 if myfavkey in added_favorites:
6002 added_favorites.add(myfavkey)
6003 except portage.exception.InvalidDependString as e:
6004 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6005 (x.cpv, e), noiselevel=-1)
6006 writemsg("!!! see '%s'\n\n" % os.path.join(
6007 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6010 for arg in self._dynamic_config._initial_arg_list:
6011 if not isinstance(arg, SetArg):
6013 if arg.root_config.root != root_config.root:
6016 if k in ("selected", "world") or \
6017 not root_config.sets[k].world_candidate:
6022 all_added.append(SETPREFIX + k)
6023 all_added.extend(added_favorites)
6026 writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
6027 colorize("INFORM", str(a)), noiselevel=-1)
6029 world_set.update(all_added)
6034 def _loadResumeCommand(self, resume_data, skip_masked=True,
6037 Add a resume command to the graph and validate it in the process. This
6038 will raise a PackageNotFound exception if a package is not available.
6043 if not isinstance(resume_data, dict):
6046 mergelist = resume_data.get("mergelist")
6047 if not isinstance(mergelist, list):
6050 favorites = resume_data.get("favorites")
6051 args_set = self._dynamic_config.sets[
6052 self._frozen_config.target_root].sets['__non_set_args__']
6053 if isinstance(favorites, list):
6054 args = self._load_favorites(favorites)
6058 fakedb = self._dynamic_config.mydbapi
6059 trees = self._frozen_config.trees
6060 serialized_tasks = []
6063 if not (isinstance(x, list) and len(x) == 4):
6065 pkg_type, myroot, pkg_key, action = x
6066 if pkg_type not in self.pkg_tree_map:
6068 if action != "merge":
6070 root_config = self._frozen_config.roots[myroot]
6072 # Use the resume "favorites" list to see if a repo was specified
6074 depgraph_sets = self._dynamic_config.sets[root_config.root]
6076 for atom in depgraph_sets.atoms.getAtoms():
6077 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6081 atom = "=" + pkg_key
6083 atom = atom + _repo_separator + repo
6086 atom = Atom(atom, allow_repo=True)
6091 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6092 if not self._pkg_visibility_check(pkg) or \
6093 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6094 modified_use=self._pkg_use_enabled(pkg)):
6099 # It does no exist or it is corrupt.
6101 # TODO: log these somewhere
6103 raise portage.exception.PackageNotFound(pkg_key)
6105 if "merge" == pkg.operation and \
6106 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6107 modified_use=self._pkg_use_enabled(pkg)):
6110 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6112 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6114 self._dynamic_config._unsatisfied_deps_for_display.append(
6115 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6117 fakedb[myroot].cpv_inject(pkg)
6118 serialized_tasks.append(pkg)
6119 self._spinner_update()
6121 if self._dynamic_config._unsatisfied_deps_for_display:
6124 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6125 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6126 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6128 self._select_package = self._select_pkg_from_graph
6129 self._dynamic_config.myparams["selective"] = True
6130 # Always traverse deep dependencies in order to account for
6131 # potentially unsatisfied dependencies of installed packages.
6132 # This is necessary for correct --keep-going or --resume operation
6133 # in case a package from a group of circularly dependent packages
6134 # fails. In this case, a package which has recently been installed
6135 # may have an unsatisfied circular dependency (pulled in by
6136 # PDEPEND, for example). So, even though a package is already
6137 # installed, it may not have all of it's dependencies satisfied, so
6138 # it may not be usable. If such a package is in the subgraph of
6139 # deep depenedencies of a scheduled build, that build needs to
6140 # be cancelled. In order for this type of situation to be
6141 # recognized, deep traversal of dependencies is required.
6142 self._dynamic_config.myparams["deep"] = True
6144 for task in serialized_tasks:
6145 if isinstance(task, Package) and \
6146 task.operation == "merge":
6147 if not self._add_pkg(task, None):
6150 # Packages for argument atoms need to be explicitly
6151 # added via _add_pkg() so that they are included in the
6152 # digraph (needed at least for --tree display).
6153 for arg in self._expand_set_args(args, add_to_digraph=True):
6154 for atom in arg.pset.getAtoms():
6155 pkg, existing_node = self._select_package(
6156 arg.root_config.root, atom)
6157 if existing_node is None and \
6159 if not self._add_pkg(pkg, Dependency(atom=atom,
6160 root=pkg.root, parent=arg)):
6163 # Allow unsatisfied deps here to avoid showing a masking
6164 # message for an unsatisfied dep that isn't necessarily
6166 if not self._create_graph(allow_unsatisfied=True):
6169 unsatisfied_deps = []
6170 for dep in self._dynamic_config._unsatisfied_deps:
6171 if not isinstance(dep.parent, Package):
6173 if dep.parent.operation == "merge":
6174 unsatisfied_deps.append(dep)
6177 # For unsatisfied deps of installed packages, only account for
6178 # them if they are in the subgraph of dependencies of a package
6179 # which is scheduled to be installed.
6180 unsatisfied_install = False
6182 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6184 node = dep_stack.pop()
6185 if not isinstance(node, Package):
6187 if node.operation == "merge":
6188 unsatisfied_install = True
6190 if node in traversed:
6193 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
6195 if unsatisfied_install:
6196 unsatisfied_deps.append(dep)
6198 if masked_tasks or unsatisfied_deps:
6199 # This probably means that a required package
6200 # was dropped via --skipfirst. It makes the
6201 # resume list invalid, so convert it to a
6202 # UnsatisfiedResumeDep exception.
6203 raise self.UnsatisfiedResumeDep(self,
6204 masked_tasks + unsatisfied_deps)
6205 self._dynamic_config._serialized_tasks_cache = None
6208 except self._unknown_internal_error:
6213 def _load_favorites(self, favorites):
6215 Use a list of favorites to resume state from a
6216 previous select_files() call. This creates similar
6217 DependencyArg instances to those that would have
6218 been created by the original select_files() call.
6219 This allows Package instances to be matched with
6220 DependencyArg instances during graph creation.
6222 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6223 sets = root_config.sets
6224 depgraph_sets = self._dynamic_config.sets[root_config.root]
6227 if not isinstance(x, basestring):
6229 if x in ("system", "world"):
6231 if x.startswith(SETPREFIX):
6232 s = x[len(SETPREFIX):]
6235 if s in depgraph_sets.sets:
6238 depgraph_sets.sets[s] = pset
6239 args.append(SetArg(arg=x, pset=pset,
6240 root_config=root_config))
6243 x = Atom(x, allow_repo=True)
6244 except portage.exception.InvalidAtom:
6246 args.append(AtomArg(arg=x, atom=x,
6247 root_config=root_config))
6249 self._set_args(args)
6252 class UnsatisfiedResumeDep(portage.exception.PortageException):
6254 A dependency of a resume list is not installed. This
6255 can occur when a required package is dropped from the
6256 merge list via --skipfirst.
6258 def __init__(self, depgraph, value):
6259 portage.exception.PortageException.__init__(self, value)
6260 self.depgraph = depgraph
6262 class _internal_exception(portage.exception.PortageException):
6263 def __init__(self, value=""):
6264 portage.exception.PortageException.__init__(self, value)
6266 class _unknown_internal_error(_internal_exception):
6268 Used by the depgraph internally to terminate graph creation.
6269 The specific reason for the failure should have been dumped
6270 to stderr, unfortunately, the exact reason for the failure
6274 class _serialize_tasks_retry(_internal_exception):
6276 This is raised by the _serialize_tasks() method when it needs to
6277 be called again for some reason. The only case that it's currently
6278 used for is when neglected dependencies need to be added to the
6279 graph in order to avoid making a potentially unsafe decision.
6282 class _backtrack_mask(_internal_exception):
6284 This is raised by _show_unsatisfied_dep() when it's called with
6285 check_backtrack=True and a matching package has been masked by
6289 def need_restart(self):
6290 return self._dynamic_config._need_restart and \
6291 not self._dynamic_config._skip_restart
6293 def success_without_autounmask(self):
6294 return self._dynamic_config._success_without_autounmask
6296 def get_backtrack_infos(self):
6297 return self._dynamic_config._backtrack_infos
6300 class _dep_check_composite_db(dbapi):
6302 A dbapi-like interface that is optimized for use in dep_check() calls.
6303 This is built on top of the existing depgraph package selection logic.
6304 Some packages that have been added to the graph may be masked from this
6305 view in order to influence the atom preference selection that occurs
6308 def __init__(self, depgraph, root):
6309 dbapi.__init__(self)
6310 self._depgraph = depgraph
6312 self._match_cache = {}
6313 self._cpv_pkg_map = {}
6315 def _clear_cache(self):
6316 self._match_cache.clear()
6317 self._cpv_pkg_map.clear()
6319 def cp_list(self, cp):
6321 Emulate cp_list just so it can be used to check for existence
6322 of new-style virtuals. Since it's a waste of time to return
6323 more than one cpv for this use case, a maximum of one cpv will
6326 if isinstance(cp, Atom):
6331 for pkg in self._depgraph._iter_match_pkgs_any(
6332 self._depgraph._frozen_config.roots[self._root], atom):
6339 def match(self, atom):
6340 ret = self._match_cache.get(atom)
6343 pkg, existing = self._depgraph._select_package(self._root, atom)
6347 # Return the highest available from select_package() as well as
6348 # any matching slots in the graph db.
6350 slots.add(pkg.metadata["SLOT"])
6351 if pkg.cp.startswith("virtual/"):
6352 # For new-style virtual lookahead that occurs inside
6353 # dep_check(), examine all slots. This is needed
6354 # so that newer slots will not unnecessarily be pulled in
6355 # when a satisfying lower slot is already installed. For
6356 # example, if virtual/jdk-1.4 is satisfied via kaffe then
6357 # there's no need to pull in a newer slot to satisfy a
6358 # virtual/jdk dependency.
6359 for db, pkg_type, built, installed, db_keys in \
6360 self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
6361 for cpv in db.match(atom):
6362 if portage.cpv_getkey(cpv) != pkg.cp:
6364 slots.add(db.aux_get(cpv, ["SLOT"])[0])
6366 if self._visible(pkg):
6367 self._cpv_pkg_map[pkg.cpv] = pkg
6369 slots.remove(pkg.metadata["SLOT"])
6371 slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
6372 pkg, existing = self._depgraph._select_package(
6373 self._root, slot_atom)
6376 if not self._visible(pkg):
6378 self._cpv_pkg_map[pkg.cpv] = pkg
6381 self._cpv_sort_ascending(ret)
6382 self._match_cache[atom] = ret
6385 def _visible(self, pkg):
6386 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
6388 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
6389 except (StopIteration, portage.exception.InvalidDependString):
6393 if pkg.installed and \
6394 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
6395 # Account for packages with masks (like KEYWORDS masks)
6396 # that are usually ignored in visibility checks for
6397 # installed packages, in order to handle cases like
6399 myopts = self._depgraph._frozen_config.myopts
6400 use_ebuild_visibility = myopts.get(
6401 '--use-ebuild-visibility', 'n') != 'n'
6402 avoid_update = "--update" not in myopts and \
6403 "remove" not in self._depgraph._dynamic_config.myparams
6404 usepkgonly = "--usepkgonly" in myopts
6405 if not avoid_update:
6406 if not use_ebuild_visibility and usepkgonly:
6410 pkg_eb = self._depgraph._pkg(
6411 pkg.cpv, "ebuild", pkg.root_config,
6413 except portage.exception.PackageNotFound:
6414 pkg_eb_visible = False
6415 for pkg_eb in self._depgraph._iter_match_pkgs(
6416 pkg.root_config, "ebuild",
6417 Atom("=%s" % (pkg.cpv,))):
6418 if self._depgraph._pkg_visibility_check(pkg_eb):
6419 pkg_eb_visible = True
6421 if not pkg_eb_visible:
6424 if not self._depgraph._pkg_visibility_check(pkg_eb):
6427 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
6428 self._root].get(pkg.slot_atom)
6429 if in_graph is None:
6430 # Mask choices for packages which are not the highest visible
6431 # version within their slot (since they usually trigger slot
6433 highest_visible, in_graph = self._depgraph._select_package(
6434 self._root, pkg.slot_atom)
6435 # Note: highest_visible is not necessarily the real highest
6436 # visible, especially when --update is not enabled, so use
6437 # < operator instead of !=.
6438 if pkg < highest_visible:
6440 elif in_graph != pkg:
6441 # Mask choices for packages that would trigger a slot
6442 # conflict with a previously selected package.
6446 def aux_get(self, cpv, wants):
6447 metadata = self._cpv_pkg_map[cpv].metadata
6448 return [metadata.get(x, "") for x in wants]
6450 def match_pkgs(self, atom):
6451 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
6453 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
6455 if "--quiet" in myopts:
6456 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6457 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
6458 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6459 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
6462 s = search(root_config, spinner, "--searchdesc" in myopts,
6463 "--quiet" not in myopts, "--usepkg" in myopts,
6464 "--usepkgonly" in myopts)
6465 null_cp = portage.dep_getkey(insert_category_into_atom(
6467 cat, atom_pn = portage.catsplit(null_cp)
6468 s.searchkey = atom_pn
6469 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6472 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6473 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
6475 def insert_category_into_atom(atom, category):
6476 alphanum = re.search(r'\w', atom)
6478 ret = atom[:alphanum.start()] + "%s/" % category + \
6479 atom[alphanum.start():]
6484 def _spinner_start(spinner, myopts):
6487 if "--quiet" not in myopts and \
6488 ("--pretend" in myopts or "--ask" in myopts or \
6489 "--tree" in myopts or "--verbose" in myopts):
6491 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
6493 elif "--buildpkgonly" in myopts:
6497 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
6498 if "--unordered-display" in myopts:
6499 portage.writemsg_stdout("\n" + \
6500 darkgreen("These are the packages that " + \
6501 "would be %s:" % action) + "\n\n")
6503 portage.writemsg_stdout("\n" + \
6504 darkgreen("These are the packages that " + \
6505 "would be %s, in reverse order:" % action) + "\n\n")
6507 portage.writemsg_stdout("\n" + \
6508 darkgreen("These are the packages that " + \
6509 "would be %s, in order:" % action) + "\n\n")
6511 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
6512 if not show_spinner:
6513 spinner.update = spinner.update_quiet
6516 portage.writemsg_stdout("Calculating dependencies ")
6518 def _spinner_stop(spinner):
6519 if spinner is None or \
6520 spinner.update == spinner.update_quiet:
6523 if spinner.update != spinner.update_basic:
6524 # update_basic is used for non-tty output,
6525 # so don't output backspaces in that case.
6526 portage.writemsg_stdout("\b\b")
6528 portage.writemsg_stdout("... done!\n")
6530 def backtrack_depgraph(settings, trees, myopts, myparams,
6531 myaction, myfiles, spinner):
6533 Raises PackageSetNotFound if myfiles contains a missing package set.
6535 _spinner_start(spinner, myopts)
6537 return _backtrack_depgraph(settings, trees, myopts, myparams,
6538 myaction, myfiles, spinner)
6540 _spinner_stop(spinner)
6543 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
6545 max_retries = myopts.get('--backtrack', 10)
6546 max_depth = max(1, (max_retries + 1) / 2)
6547 allow_backtracking = max_retries > 0
6548 backtracker = Backtracker(max_depth)
6551 frozen_config = _frozen_depgraph_config(settings, trees,
6555 backtrack_parameters = backtracker.get()
6557 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6558 frozen_config=frozen_config,
6559 allow_backtracking=allow_backtracking,
6560 backtrack_parameters=backtrack_parameters)
6561 success, favorites = mydepgraph.select_files(myfiles)
6563 if success or mydepgraph.success_without_autounmask():
6565 elif not allow_backtracking:
6567 elif backtracked >= max_retries:
6569 elif mydepgraph.need_restart():
6571 backtracker.feedback(mydepgraph.get_backtrack_infos())
6575 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
6577 if "--debug" in myopts:
6579 "\n\nbacktracking aborted after %s tries\n\n" % \
6580 backtracked, noiselevel=-1, level=logging.DEBUG)
6582 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6583 frozen_config=frozen_config,
6584 allow_backtracking=False,
6585 backtrack_parameters=backtracker.get_best_run())
6586 success, favorites = mydepgraph.select_files(myfiles)
6588 return (success, mydepgraph, favorites)
6591 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6593 Raises PackageSetNotFound if myfiles contains a missing package set.
6595 _spinner_start(spinner, myopts)
6597 return _resume_depgraph(settings, trees, mtimedb, myopts,
6600 _spinner_stop(spinner)
6602 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
6604 Construct a depgraph for the given resume list. This will raise
6605 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
6606 TODO: Return reasons for dropped_tasks, for display/logging.
6608 @returns: (success, depgraph, dropped_tasks)
6611 skip_unsatisfied = True
6612 mergelist = mtimedb["resume"]["mergelist"]
6613 dropped_tasks = set()
6614 frozen_config = _frozen_depgraph_config(settings, trees,
6617 mydepgraph = depgraph(settings, trees,
6618 myopts, myparams, spinner, frozen_config=frozen_config)
6620 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
6621 skip_masked=skip_masked)
6622 except depgraph.UnsatisfiedResumeDep as e:
6623 if not skip_unsatisfied:
6626 graph = mydepgraph._dynamic_config.digraph
6627 unsatisfied_parents = dict((dep.parent, dep.parent) \
6629 traversed_nodes = set()
6630 unsatisfied_stack = list(unsatisfied_parents)
6631 while unsatisfied_stack:
6632 pkg = unsatisfied_stack.pop()
6633 if pkg in traversed_nodes:
6635 traversed_nodes.add(pkg)
6637 # If this package was pulled in by a parent
6638 # package scheduled for merge, removing this
6639 # package may cause the the parent package's
6640 # dependency to become unsatisfied.
6641 for parent_node in graph.parent_nodes(pkg):
6642 if not isinstance(parent_node, Package) \
6643 or parent_node.operation not in ("merge", "nomerge"):
6646 graph.child_nodes(parent_node,
6647 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
6648 if pkg in unsatisfied:
6649 unsatisfied_parents[parent_node] = parent_node
6650 unsatisfied_stack.append(parent_node)
6652 pruned_mergelist = []
6654 if isinstance(x, list) and \
6655 tuple(x) not in unsatisfied_parents:
6656 pruned_mergelist.append(x)
6658 # If the mergelist doesn't shrink then this loop is infinite.
6659 if len(pruned_mergelist) == len(mergelist):
6660 # This happens if a package can't be dropped because
6661 # it's already installed, but it has unsatisfied PDEPEND.
6663 mergelist[:] = pruned_mergelist
6665 # Exclude installed packages that have been removed from the graph due
6666 # to failure to build/install runtime dependencies after the dependent
6667 # package has already been installed.
6668 dropped_tasks.update(pkg for pkg in \
6669 unsatisfied_parents if pkg.operation != "nomerge")
6671 del e, graph, traversed_nodes, \
6672 unsatisfied_parents, unsatisfied_stack
6676 return (success, mydepgraph, dropped_tasks)
6678 def get_mask_info(root_config, cpv, pkgsettings,
6679 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
6682 metadata = dict(zip(db_keys,
6683 db.aux_get(cpv, db_keys, myrepo=myrepo)))
6687 if metadata is None:
6688 mreasons = ["corruption"]
6690 eapi = metadata['EAPI']
6693 if not portage.eapi_is_supported(eapi):
6694 mreasons = ['EAPI %s' % eapi]
6696 pkg = Package(type_name=pkg_type, root_config=root_config,
6697 cpv=cpv, built=built, installed=installed, metadata=metadata)
6700 if _pkg_use_enabled is not None:
6701 modified_use = _pkg_use_enabled(pkg)
6703 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
6705 return metadata, mreasons
6707 def show_masked_packages(masked_packages):
6708 shown_licenses = set()
6709 shown_comments = set()
6710 # Maybe there is both an ebuild and a binary. Only
6711 # show one of them to avoid redundant appearance.
6713 have_eapi_mask = False
6714 for (root_config, pkgsettings, cpv, repo,
6715 metadata, mreasons) in masked_packages:
6718 output_cpv += _repo_separator + repo
6719 if output_cpv in shown_cpvs:
6721 shown_cpvs.add(output_cpv)
6722 comment, filename = None, None
6723 if "package.mask" in mreasons:
6724 comment, filename = \
6725 portage.getmaskingreason(
6726 cpv, metadata=metadata,
6727 settings=pkgsettings,
6728 portdb=root_config.trees["porttree"].dbapi,
6729 return_location=True)
6730 missing_licenses = []
6732 if not portage.eapi_is_supported(metadata["EAPI"]):
6733 have_eapi_mask = True
6735 missing_licenses = \
6736 pkgsettings._getMissingLicenses(
6738 except portage.exception.InvalidDependString:
6739 # This will have already been reported
6740 # above via mreasons.
6743 writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
6745 if comment and comment not in shown_comments:
6746 writemsg_stdout(filename + ":\n" + comment + "\n",
6748 shown_comments.add(comment)
6749 portdb = root_config.trees["porttree"].dbapi
6750 for l in missing_licenses:
6751 l_path = portdb.findLicensePath(l)
6752 if l in shown_licenses:
6754 msg = ("A copy of the '%s' license" + \
6755 " is located at '%s'.\n\n") % (l, l_path)
6756 writemsg_stdout(msg, noiselevel=-1)
6757 shown_licenses.add(l)
6758 return have_eapi_mask
6760 def show_mask_docs():
6761 writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
6762 writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
6764 def show_blocker_docs_link():
6765 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
6766 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
6767 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
6769 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
6770 return [mreason.message for \
6771 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
6773 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
6774 mreasons = _getmaskingstatus(
6775 pkg, settings=pkgsettings,
6776 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
6778 if not pkg.installed:
6779 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
6780 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
6781 pkg.metadata["CHOST"]))
6784 for msg_type, msgs in pkg.invalid.items():
6787 _MaskReason("invalid", "invalid: %s" % (msg,)))
6789 if not pkg.metadata["SLOT"]:
6791 _MaskReason("invalid", "SLOT: undefined"))