1 # Copyright 1999-2012 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
13 from collections import deque
14 from itertools import chain
17 from portage import os, OrderedDict
18 from portage import _unicode_decode, _unicode_encode, _encodings
19 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
20 from portage.dbapi import dbapi
21 from portage.dbapi.dep_expand import dep_expand
22 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
23 check_required_use, human_readable_required_use, match_from_list, \
25 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
26 from portage.exception import InvalidAtom, InvalidDependString, PortageException
27 from portage.output import colorize, create_color_func, \
29 bad = create_color_func("BAD")
30 from portage.package.ebuild.getmaskingstatus import \
31 _getmaskingstatus, _MaskReason
32 from portage._sets import SETPREFIX
33 from portage._sets.base import InternalPackageSet
34 from portage.util import ConfigProtect, shlex_split, new_protect_filename
35 from portage.util import cmp_sort_key, writemsg, writemsg_stdout
36 from portage.util import ensure_dirs
37 from portage.util import writemsg_level, write_atomic
38 from portage.util.digraph import digraph
39 from portage.util.listdir import _ignorecvs_dirs
40 from portage.versions import catpkgsplit
42 from _emerge.AtomArg import AtomArg
43 from _emerge.Blocker import Blocker
44 from _emerge.BlockerCache import BlockerCache
45 from _emerge.BlockerDepPriority import BlockerDepPriority
46 from _emerge.countdown import countdown
47 from _emerge.create_world_atom import create_world_atom
48 from _emerge.Dependency import Dependency
49 from _emerge.DependencyArg import DependencyArg
50 from _emerge.DepPriority import DepPriority
51 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
52 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
53 from _emerge.FakeVartree import FakeVartree
54 from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
55 from _emerge.is_valid_package_atom import insert_category_into_atom, \
57 from _emerge.Package import Package
58 from _emerge.PackageArg import PackageArg
59 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
60 from _emerge.RootConfig import RootConfig
61 from _emerge.search import search
62 from _emerge.SetArg import SetArg
63 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
64 from _emerge.UnmergeDepPriority import UnmergeDepPriority
65 from _emerge.UseFlagDisplay import pkg_use_display
66 from _emerge.userquery import userquery
68 from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
69 from _emerge.resolver.slot_collision import slot_conflict_handler
70 from _emerge.resolver.circular_dependency import circular_dependency_handler
71 from _emerge.resolver.output import Display
73 if sys.hexversion >= 0x3000000:
77 class _scheduler_graph_config(object):
78 def __init__(self, trees, pkg_cache, graph, mergelist):
80 self.pkg_cache = pkg_cache
82 self.mergelist = mergelist
84 def _wildcard_set(atoms):
85 pkgs = InternalPackageSet(allow_wildcard=True)
88 x = Atom(x, allow_wildcard=True, allow_repo=False)
89 except portage.exception.InvalidAtom:
90 x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
94 class _frozen_depgraph_config(object):
96 def __init__(self, settings, trees, myopts, spinner):
97 self.settings = settings
98 self.target_root = settings["EROOT"]
101 if settings.get("PORTAGE_DEBUG", "") == "1":
103 self.spinner = spinner
104 self._running_root = trees[trees._running_eroot]["root_config"]
105 self.pkgsettings = {}
107 self._trees_orig = trees
109 # All Package instances
111 self._highest_license_masked = {}
112 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
114 self.trees[myroot] = {}
115 # Create a RootConfig instance that references
116 # the FakeVartree instead of the real one.
117 self.roots[myroot] = RootConfig(
118 trees[myroot]["vartree"].settings,
120 trees[myroot]["root_config"].setconfig)
121 for tree in ("porttree", "bintree"):
122 self.trees[myroot][tree] = trees[myroot][tree]
123 self.trees[myroot]["vartree"] = \
124 FakeVartree(trees[myroot]["root_config"],
125 pkg_cache=self._pkg_cache,
126 pkg_root_config=self.roots[myroot],
127 dynamic_deps=dynamic_deps)
128 self.pkgsettings[myroot] = portage.config(
129 clone=self.trees[myroot]["vartree"].settings)
131 self._required_set_names = set(["world"])
133 atoms = ' '.join(myopts.get("--exclude", [])).split()
134 self.excluded_pkgs = _wildcard_set(atoms)
135 atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
136 self.reinstall_atoms = _wildcard_set(atoms)
137 atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
138 self.usepkg_exclude = _wildcard_set(atoms)
139 atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
140 self.useoldpkg_atoms = _wildcard_set(atoms)
141 atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
142 self.rebuild_exclude = _wildcard_set(atoms)
143 atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
144 self.rebuild_ignore = _wildcard_set(atoms)
146 self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
147 self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
148 self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
150 class _depgraph_sets(object):
152 # contains all sets added to the graph
154 # contains non-set atoms given as arguments
155 self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
156 # contains all atoms from all sets added to the graph, including
157 # atoms given as arguments
158 self.atoms = InternalPackageSet(allow_repo=True)
159 self.atom_arg_map = {}
161 class _rebuild_config(object):
162 def __init__(self, frozen_config, backtrack_parameters):
163 self._graph = digraph()
164 self._frozen_config = frozen_config
165 self.rebuild_list = backtrack_parameters.rebuild_list.copy()
166 self.orig_rebuild_list = self.rebuild_list.copy()
167 self.reinstall_list = backtrack_parameters.reinstall_list.copy()
168 self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
169 self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
170 self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
171 self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
172 self.rebuild_if_unbuilt)
174 def add(self, dep_pkg, dep):
175 parent = dep.collapsed_parent
176 priority = dep.collapsed_priority
177 rebuild_exclude = self._frozen_config.rebuild_exclude
178 rebuild_ignore = self._frozen_config.rebuild_ignore
179 if (self.rebuild and isinstance(parent, Package) and
180 parent.built and priority.buildtime and
181 isinstance(dep_pkg, Package) and
182 not rebuild_exclude.findAtomForPackage(parent) and
183 not rebuild_ignore.findAtomForPackage(dep_pkg)):
184 self._graph.add(dep_pkg, parent, priority)
186 def _needs_rebuild(self, dep_pkg):
187 """Check whether packages that depend on dep_pkg need to be rebuilt."""
188 dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
189 if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
192 if self.rebuild_if_unbuilt:
193 # dep_pkg is being installed from source, so binary
194 # packages for parents are invalid. Force rebuild
197 trees = self._frozen_config.trees
198 vardb = trees[dep_pkg.root]["vartree"].dbapi
199 if self.rebuild_if_new_rev:
200 # Parent packages are valid if a package with the same
201 # cpv is already installed.
202 return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
204 # Otherwise, parent packages are valid if a package with the same
205 # version (excluding revision) is already installed.
206 assert self.rebuild_if_new_ver
207 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
208 for inst_cpv in vardb.match(dep_pkg.slot_atom):
209 inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
210 if inst_cpv_norev == cpv_norev:
215 def _trigger_rebuild(self, parent, build_deps):
216 root_slot = (parent.root, parent.slot_atom)
217 if root_slot in self.rebuild_list:
219 trees = self._frozen_config.trees
221 for slot_atom, dep_pkg in build_deps.items():
222 dep_root_slot = (dep_pkg.root, slot_atom)
223 if self._needs_rebuild(dep_pkg):
224 self.rebuild_list.add(root_slot)
226 elif ("--usepkg" in self._frozen_config.myopts and
227 (dep_root_slot in self.reinstall_list or
228 dep_root_slot in self.rebuild_list or
229 not dep_pkg.installed)):
231 # A direct rebuild dependency is being installed. We
232 # should update the parent as well to the latest binary,
233 # if that binary is valid.
235 # To validate the binary, we check whether all of the
236 # rebuild dependencies are present on the same binhost.
238 # 1) If parent is present on the binhost, but one of its
239 # rebuild dependencies is not, then the parent should
240 # be rebuilt from source.
241 # 2) Otherwise, the parent binary is assumed to be valid,
242 # because all of its rebuild dependencies are
244 bintree = trees[parent.root]["bintree"]
245 uri = bintree.get_pkgindex_uri(parent.cpv)
246 dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
247 bindb = bintree.dbapi
248 if self.rebuild_if_new_ver and uri and uri != dep_uri:
249 cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
250 for cpv in bindb.match(dep_pkg.slot_atom):
251 if cpv_norev == catpkgsplit(cpv)[:-1]:
252 dep_uri = bintree.get_pkgindex_uri(cpv)
255 if uri and uri != dep_uri:
256 # 1) Remote binary package is invalid because it was
257 # built without dep_pkg. Force rebuild.
258 self.rebuild_list.add(root_slot)
260 elif (parent.installed and
261 root_slot not in self.reinstall_list):
262 inst_build_time = parent.metadata.get("BUILD_TIME")
264 bin_build_time, = bindb.aux_get(parent.cpv,
268 if bin_build_time != inst_build_time:
269 # 2) Remote binary package is valid, and local package
270 # is not up to date. Force reinstall.
273 self.reinstall_list.add(root_slot)
276 def trigger_rebuilds(self):
278 Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
279 depends on pkgA at both build-time and run-time, pkgB needs to be
286 leaf_nodes = deque(graph.leaf_nodes())
288 # Trigger rebuilds bottom-up (starting with the leaves) so that parents
289 # will always know which children are being rebuilt.
292 # We'll have to drop an edge. This should be quite rare.
293 leaf_nodes.append(graph.order[-1])
295 node = leaf_nodes.popleft()
296 if node not in graph:
297 # This can be triggered by circular dependencies.
299 slot_atom = node.slot_atom
301 # Remove our leaf node from the graph, keeping track of deps.
302 parents = graph.parent_nodes(node)
304 node_build_deps = build_deps.get(node, {})
305 for parent in parents:
307 # Ignore a direct cycle.
309 parent_bdeps = build_deps.setdefault(parent, {})
310 parent_bdeps[slot_atom] = node
311 if not graph.child_nodes(parent):
312 leaf_nodes.append(parent)
314 # Trigger rebuilds for our leaf node. Because all of our children
315 # have been processed, the build_deps will be completely filled in,
316 # and self.rebuild_list / self.reinstall_list will tell us whether
317 # any of our children need to be rebuilt or reinstalled.
318 if self._trigger_rebuild(node, node_build_deps):
324 class _dynamic_depgraph_config(object):
326 def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
327 self.myparams = myparams.copy()
328 self._vdb_loaded = False
329 self._allow_backtracking = allow_backtracking
330 # Maps slot atom to package for each Package added to the graph.
331 self._slot_pkg_map = {}
332 # Maps nodes to the reasons they were selected for reinstallation.
333 self._reinstall_nodes = {}
335 # Contains a filtered view of preferred packages that are selected
336 # from available repositories.
337 self._filtered_trees = {}
338 # Contains installed packages and new packages that have been added
340 self._graph_trees = {}
341 # Caches visible packages returned from _select_package, for use in
342 # depgraph._iter_atoms_for_pkg() SLOT logic.
343 self._visible_pkgs = {}
344 #contains the args created by select_files
345 self._initial_arg_list = []
346 self.digraph = portage.digraph()
347 # manages sets added to the graph
349 # contains all nodes pulled in by self.sets
350 self._set_nodes = set()
351 # Contains only Blocker -> Uninstall edges
352 self._blocker_uninstalls = digraph()
353 # Contains only Package -> Blocker edges
354 self._blocker_parents = digraph()
355 # Contains only irrelevant Package -> Blocker edges
356 self._irrelevant_blockers = digraph()
357 # Contains only unsolvable Package -> Blocker edges
358 self._unsolvable_blockers = digraph()
359 # Contains all Blocker -> Blocked Package edges
360 self._blocked_pkgs = digraph()
361 # Contains world packages that have been protected from
362 # uninstallation but may not have been added to the graph
363 # if the graph is not complete yet.
364 self._blocked_world_pkgs = {}
365 # Contains packages whose dependencies have been traversed.
366 # This use used to check if we have accounted for blockers
367 # relevant to a package.
368 self._traversed_pkg_deps = set()
369 self._slot_collision_info = {}
370 # Slot collision nodes are not allowed to block other packages since
371 # blocker validation is only able to account for one package per slot.
372 self._slot_collision_nodes = set()
373 self._parent_atoms = {}
374 self._slot_conflict_parent_atoms = set()
375 self._slot_conflict_handler = None
376 self._circular_dependency_handler = None
377 self._serialized_tasks_cache = None
378 self._scheduler_graph = None
379 self._displayed_list = None
380 self._pprovided_args = []
381 self._missing_args = []
382 self._masked_installed = set()
383 self._masked_license_updates = set()
384 self._unsatisfied_deps_for_display = []
385 self._unsatisfied_blockers_for_display = None
386 self._circular_deps_for_display = None
388 self._dep_disjunctive_stack = []
389 self._unsatisfied_deps = []
390 self._initially_unsatisfied_deps = []
391 self._ignored_deps = []
392 self._highest_pkg_cache = {}
394 # Binary packages that have been rejected because their USE
395 # didn't match the user's config. It maps packages to a set
396 # of flags causing the rejection.
397 self.ignored_binaries = {}
399 self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
400 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
401 self._needed_license_changes = backtrack_parameters.needed_license_changes
402 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
403 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
404 self._need_restart = False
405 # For conditions that always require user intervention, such as
406 # unsatisfied REQUIRED_USE (currently has no autounmask support).
407 self._skip_restart = False
408 self._backtrack_infos = {}
410 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
411 self._success_without_autounmask = False
412 self._traverse_ignored_deps = False
414 for myroot in depgraph._frozen_config.trees:
415 self.sets[myroot] = _depgraph_sets()
416 self._slot_pkg_map[myroot] = {}
417 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
418 # This dbapi instance will model the state that the vdb will
419 # have after new packages have been installed.
420 fakedb = PackageVirtualDbapi(vardb.settings)
422 self.mydbapi[myroot] = fakedb
425 graph_tree.dbapi = fakedb
426 self._graph_trees[myroot] = {}
427 self._filtered_trees[myroot] = {}
428 # Substitute the graph tree for the vartree in dep_check() since we
429 # want atom selections to be consistent with package selections
430 # have already been made.
431 self._graph_trees[myroot]["porttree"] = graph_tree
432 self._graph_trees[myroot]["vartree"] = graph_tree
433 self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
434 self._graph_trees[myroot]["graph"] = self.digraph
437 filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
438 self._filtered_trees[myroot]["porttree"] = filtered_tree
439 self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
441 # Passing in graph_tree as the vartree here could lead to better
442 # atom selections in some cases by causing atoms for packages that
443 # have been added to the graph to be preferred over other choices.
444 # However, it can trigger atom selections that result in
445 # unresolvable direct circular dependencies. For example, this
446 # happens with gwydion-dylan which depends on either itself or
447 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
448 # gwydion-dylan-bin needs to be selected in order to avoid a
449 # an unresolvable direct circular dependency.
451 # To solve the problem described above, pass in "graph_db" so that
452 # packages that have been added to the graph are distinguishable
453 # from other available packages and installed packages. Also, pass
454 # the parent package into self._select_atoms() calls so that
455 # unresolvable direct circular dependencies can be detected and
456 # avoided when possible.
457 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
458 self._filtered_trees[myroot]["graph"] = self.digraph
459 self._filtered_trees[myroot]["vartree"] = \
460 depgraph._frozen_config.trees[myroot]["vartree"]
463 # (db, pkg_type, built, installed, db_keys)
464 if "remove" in self.myparams:
465 # For removal operations, use _dep_check_composite_db
466 # for availability and visibility checks. This provides
467 # consistency with install operations, so we don't
468 # get install/uninstall cycles like in bug #332719.
469 self._graph_trees[myroot]["porttree"] = filtered_tree
471 if "--usepkgonly" not in depgraph._frozen_config.myopts:
472 portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
473 db_keys = list(portdb._aux_cache_keys)
474 dbs.append((portdb, "ebuild", False, False, db_keys))
476 if "--usepkg" in depgraph._frozen_config.myopts:
477 bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
478 db_keys = list(bindb._aux_cache_keys)
479 dbs.append((bindb, "binary", True, False, db_keys))
481 vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
482 db_keys = list(depgraph._frozen_config._trees_orig[myroot
483 ]["vartree"].dbapi._aux_cache_keys)
484 dbs.append((vardb, "installed", True, True, db_keys))
485 self._filtered_trees[myroot]["dbs"] = dbs
487 class depgraph(object):
489 pkg_tree_map = RootConfig.pkg_tree_map
491 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
493 def __init__(self, settings, trees, myopts, myparams, spinner,
494 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
495 if frozen_config is None:
496 frozen_config = _frozen_depgraph_config(settings, trees,
498 self._frozen_config = frozen_config
499 self._dynamic_config = _dynamic_depgraph_config(self, myparams,
500 allow_backtracking, backtrack_parameters)
501 self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
503 self._select_atoms = self._select_atoms_highest_available
504 self._select_package = self._select_pkg_highest_available
508 Load installed package metadata if appropriate. This used to be called
509 from the constructor, but that wasn't very nice since this procedure
510 is slow and it generates spinner output. So, now it's called on-demand
511 by various methods when necessary.
514 if self._dynamic_config._vdb_loaded:
517 for myroot in self._frozen_config.trees:
519 dynamic_deps = self._dynamic_config.myparams.get(
520 "dynamic_deps", "y") != "n"
521 preload_installed_pkgs = \
522 "--nodeps" not in self._frozen_config.myopts
524 if self._frozen_config.myopts.get("--root-deps") is not None and \
525 myroot != self._frozen_config.target_root:
528 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
529 if not fake_vartree.dbapi:
530 # This needs to be called for the first depgraph, but not for
531 # backtracking depgraphs that share the same frozen_config.
534 # FakeVartree.sync() populates virtuals, and we want
535 # self.pkgsettings to have them populated too.
536 self._frozen_config.pkgsettings[myroot] = \
537 portage.config(clone=fake_vartree.settings)
539 if preload_installed_pkgs:
540 vardb = fake_vartree.dbapi
541 fakedb = self._dynamic_config._graph_trees[
542 myroot]["vartree"].dbapi
545 self._spinner_update()
547 # This causes FakeVartree to update the
548 # Package instance dependencies via
549 # PackageVirtualDbapi.aux_update()
550 vardb.aux_get(pkg.cpv, [])
551 fakedb.cpv_inject(pkg)
553 self._dynamic_config._vdb_loaded = True
555 def _spinner_update(self):
556 if self._frozen_config.spinner:
557 self._frozen_config.spinner.update()
559 def _show_ignored_binaries(self):
561 Show binaries that have been ignored because their USE didn't
562 match the user's config.
564 if not self._dynamic_config.ignored_binaries \
565 or '--quiet' in self._frozen_config.myopts \
566 or self._dynamic_config.myparams.get(
567 "binpkg_respect_use") in ("y", "n"):
570 for pkg in list(self._dynamic_config.ignored_binaries):
572 selected_pkg = self._dynamic_config.mydbapi[pkg.root
573 ].match_pkgs(pkg.slot_atom)
578 selected_pkg = selected_pkg[-1]
579 if selected_pkg > pkg:
580 self._dynamic_config.ignored_binaries.pop(pkg)
583 if selected_pkg.installed and \
584 selected_pkg.cpv == pkg.cpv and \
585 selected_pkg.metadata.get('BUILD_TIME') == \
586 pkg.metadata.get('BUILD_TIME'):
587 # We don't care about ignored binaries when an
588 # identical installed instance is selected to
590 self._dynamic_config.ignored_binaries.pop(pkg)
593 if not self._dynamic_config.ignored_binaries:
596 self._show_merge_list()
598 writemsg("\n!!! The following binary packages have been ignored " + \
599 "due to non matching USE:\n\n", noiselevel=-1)
601 for pkg, flags in self._dynamic_config.ignored_binaries.items():
602 writemsg(" =%s" % pkg.cpv, noiselevel=-1)
603 if pkg.root_config.settings["ROOT"] != "/":
604 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
605 writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
610 "NOTE: The --binpkg-respect-use=n option will prevent emerge",
611 " from ignoring these binary packages if possible.",
612 " Using --binpkg-respect-use=y will silence this warning."
617 line = colorize("INFORM", line)
618 writemsg(line + "\n", noiselevel=-1)
620 def _show_missed_update(self):
622 # In order to minimize noise, show only the highest
623 # missed update from each SLOT.
625 for pkg, mask_reasons in \
626 self._dynamic_config._runtime_pkg_mask.items():
628 # Exclude installed here since we only
629 # want to show available updates.
631 chosen_pkg = self._dynamic_config.mydbapi[pkg.root
632 ].match_pkgs(pkg.slot_atom)
633 if not chosen_pkg or chosen_pkg[-1] >= pkg:
635 k = (pkg.root, pkg.slot_atom)
636 if k in missed_updates:
637 other_pkg, mask_type, parent_atoms = missed_updates[k]
640 for mask_type, parent_atoms in mask_reasons.items():
643 missed_updates[k] = (pkg, mask_type, parent_atoms)
646 if not missed_updates:
649 missed_update_types = {}
650 for pkg, mask_type, parent_atoms in missed_updates.values():
651 missed_update_types.setdefault(mask_type,
652 []).append((pkg, parent_atoms))
654 if '--quiet' in self._frozen_config.myopts and \
655 '--debug' not in self._frozen_config.myopts:
656 missed_update_types.pop("slot conflict", None)
657 missed_update_types.pop("missing dependency", None)
659 self._show_missed_update_slot_conflicts(
660 missed_update_types.get("slot conflict"))
662 self._show_missed_update_unsatisfied_dep(
663 missed_update_types.get("missing dependency"))
665 def _show_missed_update_unsatisfied_dep(self, missed_updates):
667 if not missed_updates:
670 self._show_merge_list()
671 backtrack_masked = []
673 for pkg, parent_atoms in missed_updates:
676 for parent, root, atom in parent_atoms:
677 self._show_unsatisfied_dep(root, atom, myparent=parent,
678 check_backtrack=True)
679 except self._backtrack_mask:
680 # This is displayed below in abbreviated form.
681 backtrack_masked.append((pkg, parent_atoms))
684 writemsg("\n!!! The following update has been skipped " + \
685 "due to unsatisfied dependencies:\n\n", noiselevel=-1)
687 writemsg(str(pkg.slot_atom), noiselevel=-1)
688 if pkg.root_config.settings["ROOT"] != "/":
689 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
690 writemsg("\n", noiselevel=-1)
692 for parent, root, atom in parent_atoms:
693 self._show_unsatisfied_dep(root, atom, myparent=parent)
694 writemsg("\n", noiselevel=-1)
697 # These are shown in abbreviated form, in order to avoid terminal
698 # flooding from mask messages as reported in bug #285832.
699 writemsg("\n!!! The following update(s) have been skipped " + \
700 "due to unsatisfied dependencies\n" + \
701 "!!! triggered by backtracking:\n\n", noiselevel=-1)
702 for pkg, parent_atoms in backtrack_masked:
703 writemsg(str(pkg.slot_atom), noiselevel=-1)
704 if pkg.root_config.settings["ROOT"] != "/":
705 writemsg(" for %s" % (pkg.root,), noiselevel=-1)
706 writemsg("\n", noiselevel=-1)
708 def _show_missed_update_slot_conflicts(self, missed_updates):
710 if not missed_updates:
713 self._show_merge_list()
715 msg.append("\nWARNING: One or more updates have been " + \
716 "skipped due to a dependency conflict:\n\n")
719 for pkg, parent_atoms in missed_updates:
720 msg.append(str(pkg.slot_atom))
721 if pkg.root_config.settings["ROOT"] != "/":
722 msg.append(" for %s" % (pkg.root,))
725 for parent, atom in parent_atoms:
729 msg.append(" conflicts with\n")
731 if isinstance(parent,
732 (PackageArg, AtomArg)):
733 # For PackageArg and AtomArg types, it's
734 # redundant to display the atom attribute.
735 msg.append(str(parent))
737 # Display the specific atom from SetArg or
739 msg.append("%s required by %s" % (atom, parent))
743 writemsg("".join(msg), noiselevel=-1)
745 def _show_slot_collision_notice(self):
746 """Show an informational message advising the user to mask one of the
747 the packages. In some cases it may be possible to resolve this
748 automatically, but support for backtracking (removal nodes that have
749 already been selected) will be required in order to handle all possible
753 if not self._dynamic_config._slot_collision_info:
756 self._show_merge_list()
758 self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
759 handler = self._dynamic_config._slot_conflict_handler
761 conflict = handler.get_conflict()
762 writemsg(conflict, noiselevel=-1)
764 explanation = handler.get_explanation()
766 writemsg(explanation, noiselevel=-1)
769 if "--quiet" in self._frozen_config.myopts:
773 msg.append("It may be possible to solve this problem ")
774 msg.append("by using package.mask to prevent one of ")
775 msg.append("those packages from being selected. ")
776 msg.append("However, it is also possible that conflicting ")
777 msg.append("dependencies exist such that they are impossible to ")
778 msg.append("satisfy simultaneously. If such a conflict exists in ")
779 msg.append("the dependencies of two different packages, then those ")
780 msg.append("packages can not be installed simultaneously.")
781 backtrack_opt = self._frozen_config.myopts.get('--backtrack')
782 if not self._dynamic_config._allow_backtracking and \
783 (backtrack_opt is None or \
784 (backtrack_opt > 0 and backtrack_opt < 30)):
785 msg.append(" You may want to try a larger value of the ")
786 msg.append("--backtrack option, such as --backtrack=30, ")
787 msg.append("in order to see if that will solve this conflict ")
788 msg.append("automatically.")
790 for line in textwrap.wrap(''.join(msg), 70):
791 writemsg(line + '\n', noiselevel=-1)
792 writemsg('\n', noiselevel=-1)
795 msg.append("For more information, see MASKED PACKAGES ")
796 msg.append("section in the emerge man page or refer ")
797 msg.append("to the Gentoo Handbook.")
798 for line in textwrap.wrap(''.join(msg), 70):
799 writemsg(line + '\n', noiselevel=-1)
800 writemsg('\n', noiselevel=-1)
802 def _process_slot_conflicts(self):
804 Process slot conflict data to identify specific atoms which
805 lead to conflict. These atoms only match a subset of the
806 packages that have been pulled into a given slot.
808 for (slot_atom, root), slot_nodes \
809 in self._dynamic_config._slot_collision_info.items():
811 all_parent_atoms = set()
812 for pkg in slot_nodes:
813 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
816 all_parent_atoms.update(parent_atoms)
818 for pkg in slot_nodes:
819 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
820 if parent_atoms is None:
822 self._dynamic_config._parent_atoms[pkg] = parent_atoms
823 for parent_atom in all_parent_atoms:
824 if parent_atom in parent_atoms:
826 # Use package set for matching since it will match via
827 # PROVIDE when necessary, while match_from_list does not.
828 parent, atom = parent_atom
829 atom_set = InternalPackageSet(
830 initial_atoms=(atom,), allow_repo=True)
831 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
832 parent_atoms.add(parent_atom)
834 self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
836 def _reinstall_for_flags(self, pkg, forced_flags,
837 orig_use, orig_iuse, cur_use, cur_iuse):
838 """Return a set of flags that trigger reinstallation, or None if there
839 are no such flags."""
841 # binpkg_respect_use: Behave like newuse by default. If newuse is
842 # False and changed_use is True, then behave like changed_use.
843 binpkg_respect_use = (pkg.built and
844 self._dynamic_config.myparams.get("binpkg_respect_use")
846 newuse = "--newuse" in self._frozen_config.myopts
847 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
849 if newuse or (binpkg_respect_use and not changed_use):
850 flags = set(orig_iuse.symmetric_difference(
851 cur_iuse).difference(forced_flags))
852 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
853 cur_iuse.intersection(cur_use)))
857 elif changed_use or binpkg_respect_use:
858 flags = orig_iuse.intersection(orig_use).symmetric_difference(
859 cur_iuse.intersection(cur_use))
864 def _create_graph(self, allow_unsatisfied=False):
865 dep_stack = self._dynamic_config._dep_stack
866 dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
867 while dep_stack or dep_disjunctive_stack:
868 self._spinner_update()
870 dep = dep_stack.pop()
871 if isinstance(dep, Package):
872 if not self._add_pkg_deps(dep,
873 allow_unsatisfied=allow_unsatisfied):
876 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
878 if dep_disjunctive_stack:
879 if not self._pop_disjunction(allow_unsatisfied):
883 def _expand_set_args(self, input_args, add_to_digraph=False):
885 Iterate over a list of DependencyArg instances and yield all
886 instances given in the input together with additional SetArg
887 instances that are generated from nested sets.
888 @param input_args: An iterable of DependencyArg instances
889 @type input_args: Iterable
890 @param add_to_digraph: If True then add SetArg instances
891 to the digraph, in order to record parent -> child
892 relationships from nested sets
893 @type add_to_digraph: Boolean
895 @return: All args given in the input together with additional
896 SetArg instances that are generated from nested sets
899 traversed_set_args = set()
901 for arg in input_args:
902 if not isinstance(arg, SetArg):
906 root_config = arg.root_config
907 depgraph_sets = self._dynamic_config.sets[root_config.root]
910 arg = arg_stack.pop()
911 if arg in traversed_set_args:
913 traversed_set_args.add(arg)
916 self._dynamic_config.digraph.add(arg, None,
917 priority=BlockerDepPriority.instance)
921 # Traverse nested sets and add them to the stack
922 # if they're not already in the graph. Also, graph
923 # edges between parent and nested sets.
924 for token in arg.pset.getNonAtoms():
925 if not token.startswith(SETPREFIX):
927 s = token[len(SETPREFIX):]
928 nested_set = depgraph_sets.sets.get(s)
929 if nested_set is None:
930 nested_set = root_config.sets.get(s)
931 if nested_set is not None:
932 nested_arg = SetArg(arg=token, pset=nested_set,
933 root_config=root_config)
934 arg_stack.append(nested_arg)
936 self._dynamic_config.digraph.add(nested_arg, arg,
937 priority=BlockerDepPriority.instance)
938 depgraph_sets.sets[nested_arg.name] = nested_arg.pset
940 def _add_dep(self, dep, allow_unsatisfied=False):
941 debug = "--debug" in self._frozen_config.myopts
942 buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
943 nodeps = "--nodeps" in self._frozen_config.myopts
945 if not buildpkgonly and \
947 not dep.collapsed_priority.ignored and \
948 not dep.collapsed_priority.optional and \
949 dep.parent not in self._dynamic_config._slot_collision_nodes:
950 if dep.parent.onlydeps:
951 # It's safe to ignore blockers if the
952 # parent is an --onlydeps node.
954 # The blocker applies to the root where
955 # the parent is or will be installed.
956 blocker = Blocker(atom=dep.atom,
957 eapi=dep.parent.metadata["EAPI"],
958 priority=dep.priority, root=dep.parent.root)
959 self._dynamic_config._blocker_parents.add(blocker, dep.parent)
962 if dep.child is None:
963 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
964 onlydeps=dep.onlydeps)
966 # The caller has selected a specific package
967 # via self._minimize_packages().
969 existing_node = self._dynamic_config._slot_pkg_map[
970 dep.root].get(dep_pkg.slot_atom)
973 if (dep.collapsed_priority.optional or
974 dep.collapsed_priority.ignored):
975 # This is an unnecessary build-time dep.
977 if allow_unsatisfied:
978 self._dynamic_config._unsatisfied_deps.append(dep)
980 self._dynamic_config._unsatisfied_deps_for_display.append(
981 ((dep.root, dep.atom), {"myparent":dep.parent}))
983 # The parent node should not already be in
984 # runtime_pkg_mask, since that would trigger an
985 # infinite backtracking loop.
986 if self._dynamic_config._allow_backtracking:
987 if dep.parent in self._dynamic_config._runtime_pkg_mask:
990 "!!! backtracking loop detected: %s %s\n" % \
992 self._dynamic_config._runtime_pkg_mask[
993 dep.parent]), noiselevel=-1)
994 elif not self.need_restart():
995 # Do not backtrack if only USE have to be changed in
996 # order to satisfy the dependency.
997 dep_pkg, existing_node = \
998 self._select_package(dep.root, dep.atom.without_use,
999 onlydeps=dep.onlydeps)
1001 self._dynamic_config._backtrack_infos["missing dependency"] = dep
1002 self._dynamic_config._need_restart = True
1007 msg.append("backtracking due to unsatisfied dep:")
1008 msg.append(" parent: %s" % dep.parent)
1009 msg.append(" priority: %s" % dep.priority)
1010 msg.append(" root: %s" % dep.root)
1011 msg.append(" atom: %s" % dep.atom)
1013 writemsg_level("".join("%s\n" % l for l in msg),
1014 noiselevel=-1, level=logging.DEBUG)
1018 self._rebuild.add(dep_pkg, dep)
1020 ignore = dep.collapsed_priority.ignored and \
1021 not self._dynamic_config._traverse_ignored_deps
1022 if not ignore and not self._add_pkg(dep_pkg, dep):
1026 def _check_slot_conflict(self, pkg, atom):
1027 existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1030 matches = pkg.cpv == existing_node.cpv
1031 if pkg != existing_node and \
1033 # Use package set for matching since it will match via
1034 # PROVIDE when necessary, while match_from_list does not.
1035 matches = bool(InternalPackageSet(initial_atoms=(atom,),
1036 allow_repo=True).findAtomForPackage(existing_node,
1037 modified_use=self._pkg_use_enabled(existing_node)))
1039 return (existing_node, matches)
1041 def _add_pkg(self, pkg, dep):
1043 Adds a package to the depgraph, queues dependencies, and handles
1046 debug = "--debug" in self._frozen_config.myopts
1053 myparent = dep.parent
1054 priority = dep.priority
1056 if priority is None:
1057 priority = DepPriority()
1061 "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
1062 pkg_use_display(pkg, self._frozen_config.myopts,
1063 modified_use=self._pkg_use_enabled(pkg))),
1064 level=logging.DEBUG, noiselevel=-1)
1065 if isinstance(myparent,
1066 (PackageArg, AtomArg)):
1067 # For PackageArg and AtomArg types, it's
1068 # redundant to display the atom attribute.
1070 "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
1071 level=logging.DEBUG, noiselevel=-1)
1073 # Display the specific atom from SetArg or
1076 if dep.atom is not dep.atom.unevaluated_atom:
1077 uneval = " (%s)" % (dep.atom.unevaluated_atom,)
1079 "%s%s%s required by %s\n" %
1080 ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
1081 level=logging.DEBUG, noiselevel=-1)
1083 # Ensure that the dependencies of the same package
1084 # are never processed more than once.
1085 previously_added = pkg in self._dynamic_config.digraph
1087 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
1092 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1093 except portage.exception.InvalidDependString as e:
1094 if not pkg.installed:
1095 # should have been masked before it was selected
1099 # NOTE: REQUIRED_USE checks are delayed until after
1100 # package selection, since we want to prompt the user
1101 # for USE adjustment rather than have REQUIRED_USE
1102 # affect package selection and || dep choices.
1103 if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
1104 eapi_has_required_use(pkg.metadata["EAPI"]):
1105 required_use_is_sat = check_required_use(
1106 pkg.metadata["REQUIRED_USE"],
1107 self._pkg_use_enabled(pkg),
1108 pkg.iuse.is_valid_flag)
1109 if not required_use_is_sat:
1110 if dep.atom is not None and dep.parent is not None:
1111 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1114 for parent_atom in arg_atoms:
1115 parent, atom = parent_atom
1116 self._add_parent_atom(pkg, parent_atom)
1120 atom = Atom("=" + pkg.cpv)
1121 self._dynamic_config._unsatisfied_deps_for_display.append(
1123 {"myparent" : dep.parent, "show_req_use" : pkg}))
1124 self._dynamic_config._skip_restart = True
1127 if not pkg.onlydeps:
1129 existing_node, existing_node_matches = \
1130 self._check_slot_conflict(pkg, dep.atom)
1131 slot_collision = False
1133 if existing_node_matches:
1134 # The existing node can be reused.
1136 for parent_atom in arg_atoms:
1137 parent, atom = parent_atom
1138 self._dynamic_config.digraph.add(existing_node, parent,
1140 self._add_parent_atom(existing_node, parent_atom)
1141 # If a direct circular dependency is not an unsatisfied
1142 # buildtime dependency then drop it here since otherwise
1143 # it can skew the merge order calculation in an unwanted
1145 if existing_node != myparent or \
1146 (priority.buildtime and not priority.satisfied):
1147 self._dynamic_config.digraph.addnode(existing_node, myparent,
1149 if dep.atom is not None and dep.parent is not None:
1150 self._add_parent_atom(existing_node,
1151 (dep.parent, dep.atom))
1154 # A slot conflict has occurred.
1155 # The existing node should not already be in
1156 # runtime_pkg_mask, since that would trigger an
1157 # infinite backtracking loop.
1158 if self._dynamic_config._allow_backtracking and \
1160 self._dynamic_config._runtime_pkg_mask:
1161 if "--debug" in self._frozen_config.myopts:
1163 "!!! backtracking loop detected: %s %s\n" % \
1165 self._dynamic_config._runtime_pkg_mask[
1166 existing_node]), noiselevel=-1)
1167 elif self._dynamic_config._allow_backtracking and \
1168 not self._accept_blocker_conflicts() and \
1169 not self.need_restart():
1171 self._add_slot_conflict(pkg)
1172 if dep.atom is not None and dep.parent is not None:
1173 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1176 for parent_atom in arg_atoms:
1177 parent, atom = parent_atom
1178 self._add_parent_atom(pkg, parent_atom)
1179 self._process_slot_conflicts()
1184 # The ordering of backtrack_data can make
1185 # a difference here, because both mask actions may lead
1186 # to valid, but different, solutions and the one with
1187 # 'existing_node' masked is usually the better one. Because
1188 # of that, we choose an order such that
1189 # the backtracker will first explore the choice with
1190 # existing_node masked. The backtracker reverses the
1191 # order, so the order it uses is the reverse of the
1192 # order shown here. See bug #339606.
1193 for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
1194 # For missed update messages, find out which
1195 # atoms matched to_be_selected that did not
1196 # match to_be_masked.
1198 self._dynamic_config._parent_atoms.get(to_be_selected, set())
1200 conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
1202 parent_atoms = conflict_atoms
1204 all_parents.update(parent_atoms)
1207 for parent, atom in parent_atoms:
1208 i = InternalPackageSet(initial_atoms=(atom,),
1210 if not i.findAtomForPackage(to_be_masked):
1214 fallback_data.append((to_be_masked, parent_atoms))
1217 # 'to_be_masked' does not violate any parent atom, which means
1218 # there is no point in masking it.
1221 backtrack_data.append((to_be_masked, parent_atoms))
1223 if not backtrack_data:
1224 # This shouldn't happen, but fall back to the old
1225 # behavior if this gets triggered somehow.
1226 backtrack_data = fallback_data
1228 if len(backtrack_data) > 1:
1229 # NOTE: Generally, we prefer to mask the higher
1230 # version since this solves common cases in which a
1231 # lower version is needed so that all dependencies
1232 # will be satisfied (bug #337178). However, if
1233 # existing_node happens to be installed then we
1234 # mask that since this is a common case that is
1235 # triggered when --update is not enabled.
1236 if existing_node.installed:
1238 elif pkg > existing_node:
1239 backtrack_data.reverse()
1241 to_be_masked = backtrack_data[-1][0]
1243 self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
1244 self._dynamic_config._need_restart = True
1245 if "--debug" in self._frozen_config.myopts:
1249 msg.append("backtracking due to slot conflict:")
1250 if backtrack_data is fallback_data:
1251 msg.append("!!! backtrack_data fallback")
1252 msg.append(" first package: %s" % existing_node)
1253 msg.append(" second package: %s" % pkg)
1254 msg.append(" package to mask: %s" % to_be_masked)
1255 msg.append(" slot: %s" % pkg.slot_atom)
1256 msg.append(" parents: %s" % ", ".join( \
1257 "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
1259 writemsg_level("".join("%s\n" % l for l in msg),
1260 noiselevel=-1, level=logging.DEBUG)
1263 # A slot collision has occurred. Sometimes this coincides
1264 # with unresolvable blockers, so the slot collision will be
1265 # shown later if there are no unresolvable blockers.
1266 self._add_slot_conflict(pkg)
1267 slot_collision = True
1271 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
1272 existing_node, pkg_use_display(existing_node,
1273 self._frozen_config.myopts,
1274 modified_use=self._pkg_use_enabled(existing_node))),
1275 level=logging.DEBUG, noiselevel=-1)
1278 # Now add this node to the graph so that self.display()
1279 # can show use flags and --tree portage.output. This node is
1280 # only being partially added to the graph. It must not be
1281 # allowed to interfere with the other nodes that have been
1282 # added. Do not overwrite data for existing nodes in
1283 # self._dynamic_config.mydbapi since that data will be used for blocker
1285 # Even though the graph is now invalid, continue to process
1286 # dependencies so that things like --fetchonly can still
1287 # function despite collisions.
1289 elif not previously_added:
1290 self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1291 self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
1292 self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1293 self._dynamic_config._highest_pkg_cache.clear()
1294 self._check_masks(pkg)
1296 if not pkg.installed:
1297 # Allow this package to satisfy old-style virtuals in case it
1298 # doesn't already. Any pre-existing providers will be preferred
1301 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1302 # For consistency, also update the global virtuals.
1303 settings = self._frozen_config.roots[pkg.root].settings
1305 settings.setinst(pkg.cpv, pkg.metadata)
1307 except portage.exception.InvalidDependString:
1308 if not pkg.installed:
1309 # should have been masked before it was selected
1313 self._dynamic_config._set_nodes.add(pkg)
1315 # Do this even when addme is False (--onlydeps) so that the
1316 # parent/child relationship is always known in case
1317 # self._show_slot_collision_notice() needs to be called later.
1318 self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
1319 if dep.atom is not None and dep.parent is not None:
1320 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1323 for parent_atom in arg_atoms:
1324 parent, atom = parent_atom
1325 self._dynamic_config.digraph.add(pkg, parent, priority=priority)
1326 self._add_parent_atom(pkg, parent_atom)
1328 # This section determines whether we go deeper into dependencies or not.
1329 # We want to go deeper on a few occasions:
1330 # Installing package A, we need to make sure package A's deps are met.
1331 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1332 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1336 deep = self._dynamic_config.myparams.get("deep", 0)
1337 recurse = deep is True or depth + 1 <= deep
1338 dep_stack = self._dynamic_config._dep_stack
1339 if "recurse" not in self._dynamic_config.myparams:
1341 elif pkg.installed and not recurse:
1342 dep_stack = self._dynamic_config._ignored_deps
1344 self._spinner_update()
1346 if not previously_added:
1347 dep_stack.append(pkg)
1350 def _check_masks(self, pkg):
1352 slot_key = (pkg.root, pkg.slot_atom)
1354 # Check for upgrades in the same slot that are
1355 # masked due to a LICENSE change in a newer
1356 # version that is not masked for any other reason.
1357 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
1358 if other_pkg is not None and pkg < other_pkg:
1359 self._dynamic_config._masked_license_updates.add(other_pkg)
1361 def _add_parent_atom(self, pkg, parent_atom):
1362 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
1363 if parent_atoms is None:
1364 parent_atoms = set()
1365 self._dynamic_config._parent_atoms[pkg] = parent_atoms
1366 parent_atoms.add(parent_atom)
1368 def _add_slot_conflict(self, pkg):
1369 self._dynamic_config._slot_collision_nodes.add(pkg)
1370 slot_key = (pkg.slot_atom, pkg.root)
1371 slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
1372 if slot_nodes is None:
1374 slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
1375 self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
1378 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1381 metadata = pkg.metadata
1382 removal_action = "remove" in self._dynamic_config.myparams
1385 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1387 edepend[k] = metadata[k]
1389 if not pkg.built and \
1390 "--buildpkgonly" in self._frozen_config.myopts and \
1391 "deep" not in self._dynamic_config.myparams:
1392 edepend["RDEPEND"] = ""
1393 edepend["PDEPEND"] = ""
1395 ignore_build_time_deps = False
1396 if pkg.built and not removal_action:
1397 if self._dynamic_config.myparams.get("bdeps", "n") == "y":
1398 # Pull in build time deps as requested, but marked them as
1399 # "optional" since they are not strictly required. This allows
1400 # more freedom in the merge order calculation for solving
1401 # circular dependencies. Don't convert to PDEPEND since that
1402 # could make --with-bdeps=y less effective if it is used to
1403 # adjust merge order to prevent built_with_use() calls from
1407 ignore_build_time_deps = True
1409 if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
1410 # Removal actions never traverse ignored buildtime
1411 # dependencies, so it's safe to discard them early.
1412 edepend["DEPEND"] = ""
1413 ignore_build_time_deps = True
1416 depend_root = myroot
1418 depend_root = self._frozen_config._running_root.root
1419 root_deps = self._frozen_config.myopts.get("--root-deps")
1420 if root_deps is not None:
1421 if root_deps is True:
1422 depend_root = myroot
1423 elif root_deps == "rdeps":
1424 ignore_build_time_deps = True
1426 # If rebuild mode is not enabled, it's safe to discard ignored
1427 # build-time dependencies. If you want these deps to be traversed
1428 # in "complete" mode then you need to specify --with-bdeps=y.
1429 if ignore_build_time_deps and \
1430 not self._rebuild.rebuild:
1431 edepend["DEPEND"] = ""
1434 (depend_root, edepend["DEPEND"],
1435 self._priority(buildtime=True,
1436 optional=(pkg.built or ignore_build_time_deps),
1437 ignored=ignore_build_time_deps)),
1438 (myroot, edepend["RDEPEND"],
1439 self._priority(runtime=True)),
1440 (myroot, edepend["PDEPEND"],
1441 self._priority(runtime_post=True))
1444 debug = "--debug" in self._frozen_config.myopts
1446 for dep_root, dep_string, dep_priority in deps:
1450 writemsg_level("\nParent: %s\n" % (pkg,),
1451 noiselevel=-1, level=logging.DEBUG)
1452 writemsg_level("Depstring: %s\n" % (dep_string,),
1453 noiselevel=-1, level=logging.DEBUG)
1454 writemsg_level("Priority: %s\n" % (dep_priority,),
1455 noiselevel=-1, level=logging.DEBUG)
1458 dep_string = portage.dep.use_reduce(dep_string,
1459 uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1460 except portage.exception.InvalidDependString as e:
1461 if not pkg.installed:
1462 # should have been masked before it was selected
1466 # Try again, but omit the is_valid_flag argument, since
1467 # invalid USE conditionals are a common problem and it's
1468 # practical to ignore this issue for installed packages.
1470 dep_string = portage.dep.use_reduce(dep_string,
1471 uselist=self._pkg_use_enabled(pkg))
1472 except portage.exception.InvalidDependString as e:
1473 self._dynamic_config._masked_installed.add(pkg)
1478 dep_string = list(self._queue_disjunctive_deps(
1479 pkg, dep_root, dep_priority, dep_string))
1480 except portage.exception.InvalidDependString as e:
1482 self._dynamic_config._masked_installed.add(pkg)
1486 # should have been masked before it was selected
1492 dep_string = portage.dep.paren_enclose(dep_string,
1493 unevaluated_atom=True)
1495 if not self._add_pkg_dep_string(
1496 pkg, dep_root, dep_priority, dep_string,
1500 self._dynamic_config._traversed_pkg_deps.add(pkg)
1503 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1505 _autounmask_backup = self._dynamic_config._autounmask
1506 if dep_priority.optional or dep_priority.ignored:
1507 # Temporarily disable autounmask for deps that
1508 # don't necessarily need to be satisfied.
1509 self._dynamic_config._autounmask = False
1511 return self._wrapped_add_pkg_dep_string(
1512 pkg, dep_root, dep_priority, dep_string,
1515 self._dynamic_config._autounmask = _autounmask_backup
1517 def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
1518 dep_string, allow_unsatisfied):
1519 depth = pkg.depth + 1
1520 deep = self._dynamic_config.myparams.get("deep", 0)
1521 recurse_satisfied = deep is True or depth <= deep
1522 debug = "--debug" in self._frozen_config.myopts
1523 strict = pkg.type_name != "installed"
1526 writemsg_level("\nParent: %s\n" % (pkg,),
1527 noiselevel=-1, level=logging.DEBUG)
1528 writemsg_level("Depstring: %s\n" % (dep_string,),
1529 noiselevel=-1, level=logging.DEBUG)
1530 writemsg_level("Priority: %s\n" % (dep_priority,),
1531 noiselevel=-1, level=logging.DEBUG)
1534 selected_atoms = self._select_atoms(dep_root,
1535 dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
1536 strict=strict, priority=dep_priority)
1537 except portage.exception.InvalidDependString:
1539 self._dynamic_config._masked_installed.add(pkg)
1542 # should have been masked before it was selected
1546 writemsg_level("Candidates: %s\n" % \
1547 ([str(x) for x in selected_atoms[pkg]],),
1548 noiselevel=-1, level=logging.DEBUG)
1550 root_config = self._frozen_config.roots[dep_root]
1551 vardb = root_config.trees["vartree"].dbapi
1552 traversed_virt_pkgs = set()
1554 reinstall_atoms = self._frozen_config.reinstall_atoms
1555 for atom, child in self._minimize_children(
1556 pkg, dep_priority, root_config, selected_atoms[pkg]):
1558 # If this was a specially generated virtual atom
1559 # from dep_check, map it back to the original, in
1560 # order to avoid distortion in places like display
1561 # or conflict resolution code.
1562 is_virt = hasattr(atom, '_orig_atom')
1563 atom = getattr(atom, '_orig_atom', atom)
1565 if atom.blocker and \
1566 (dep_priority.optional or dep_priority.ignored):
1567 # For --with-bdeps, ignore build-time only blockers
1568 # that originate from built packages.
1571 mypriority = dep_priority.copy()
1572 if not atom.blocker:
1573 inst_pkgs = [inst_pkg for inst_pkg in
1574 reversed(vardb.match_pkgs(atom))
1575 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1576 modified_use=self._pkg_use_enabled(inst_pkg))]
1578 for inst_pkg in inst_pkgs:
1579 if self._pkg_visibility_check(inst_pkg):
1581 mypriority.satisfied = inst_pkg
1583 if not mypriority.satisfied:
1584 # none visible, so use highest
1585 mypriority.satisfied = inst_pkgs[0]
1587 dep = Dependency(atom=atom,
1588 blocker=atom.blocker, child=child, depth=depth, parent=pkg,
1589 priority=mypriority, root=dep_root)
1591 # In some cases, dep_check will return deps that shouldn't
1592 # be proccessed any further, so they are identified and
1593 # discarded here. Try to discard as few as possible since
1594 # discarded dependencies reduce the amount of information
1595 # available for optimization of merge order.
1597 if not atom.blocker and \
1598 not recurse_satisfied and \
1599 mypriority.satisfied and \
1600 mypriority.satisfied.visible and \
1601 dep.child is not None and \
1602 not dep.child.installed and \
1603 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1604 dep.child.slot_atom) is None:
1606 if dep.root == self._frozen_config.target_root:
1608 myarg = next(self._iter_atoms_for_pkg(dep.child))
1609 except StopIteration:
1611 except InvalidDependString:
1612 if not dep.child.installed:
1613 # This shouldn't happen since the package
1614 # should have been masked.
1618 # Existing child selection may not be valid unless
1619 # it's added to the graph immediately, since "complete"
1620 # mode may select a different child later.
1623 self._dynamic_config._ignored_deps.append(dep)
1626 if dep_priority.ignored and \
1627 not self._dynamic_config._traverse_ignored_deps:
1628 if is_virt and dep.child is not None:
1629 traversed_virt_pkgs.add(dep.child)
1631 self._dynamic_config._ignored_deps.append(dep)
1633 if not self._add_dep(dep,
1634 allow_unsatisfied=allow_unsatisfied):
1636 if is_virt and dep.child is not None:
1637 traversed_virt_pkgs.add(dep.child)
1639 selected_atoms.pop(pkg)
1641 # Add selected indirect virtual deps to the graph. This
1642 # takes advantage of circular dependency avoidance that's done
1643 # by dep_zapdeps. We preserve actual parent/child relationships
1644 # here in order to avoid distorting the dependency graph like
1645 # <=portage-2.1.6.x did.
1646 for virt_dep, atoms in selected_atoms.items():
1648 virt_pkg = virt_dep.child
1649 if virt_pkg not in traversed_virt_pkgs:
1653 writemsg_level("\nCandidates: %s: %s\n" % \
1654 (virt_pkg.cpv, [str(x) for x in atoms]),
1655 noiselevel=-1, level=logging.DEBUG)
1657 if not dep_priority.ignored or \
1658 self._dynamic_config._traverse_ignored_deps:
1660 inst_pkgs = [inst_pkg for inst_pkg in
1661 reversed(vardb.match_pkgs(virt_dep.atom))
1662 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1663 modified_use=self._pkg_use_enabled(inst_pkg))]
1665 for inst_pkg in inst_pkgs:
1666 if self._pkg_visibility_check(inst_pkg):
1668 virt_dep.priority.satisfied = inst_pkg
1670 if not virt_dep.priority.satisfied:
1671 # none visible, so use highest
1672 virt_dep.priority.satisfied = inst_pkgs[0]
1674 if not self._add_pkg(virt_pkg, virt_dep):
1677 for atom, child in self._minimize_children(
1678 pkg, self._priority(runtime=True), root_config, atoms):
1680 # If this was a specially generated virtual atom
1681 # from dep_check, map it back to the original, in
1682 # order to avoid distortion in places like display
1683 # or conflict resolution code.
1684 is_virt = hasattr(atom, '_orig_atom')
1685 atom = getattr(atom, '_orig_atom', atom)
1687 # This is a GLEP 37 virtual, so its deps are all runtime.
1688 mypriority = self._priority(runtime=True)
1689 if not atom.blocker:
1690 inst_pkgs = [inst_pkg for inst_pkg in
1691 reversed(vardb.match_pkgs(atom))
1692 if not reinstall_atoms.findAtomForPackage(inst_pkg,
1693 modified_use=self._pkg_use_enabled(inst_pkg))]
1695 for inst_pkg in inst_pkgs:
1696 if self._pkg_visibility_check(inst_pkg):
1698 mypriority.satisfied = inst_pkg
1700 if not mypriority.satisfied:
1701 # none visible, so use highest
1702 mypriority.satisfied = inst_pkgs[0]
1704 # Dependencies of virtuals are considered to have the
1705 # same depth as the virtual itself.
1706 dep = Dependency(atom=atom,
1707 blocker=atom.blocker, child=child, depth=virt_dep.depth,
1708 parent=virt_pkg, priority=mypriority, root=dep_root,
1709 collapsed_parent=pkg, collapsed_priority=dep_priority)
1712 if not atom.blocker and \
1713 not recurse_satisfied and \
1714 mypriority.satisfied and \
1715 mypriority.satisfied.visible and \
1716 dep.child is not None and \
1717 not dep.child.installed and \
1718 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1719 dep.child.slot_atom) is None:
1721 if dep.root == self._frozen_config.target_root:
1723 myarg = next(self._iter_atoms_for_pkg(dep.child))
1724 except StopIteration:
1726 except InvalidDependString:
1727 if not dep.child.installed:
1733 self._dynamic_config._ignored_deps.append(dep)
1736 if dep_priority.ignored and \
1737 not self._dynamic_config._traverse_ignored_deps:
1738 if is_virt and dep.child is not None:
1739 traversed_virt_pkgs.add(dep.child)
1741 self._dynamic_config._ignored_deps.append(dep)
1743 if not self._add_dep(dep,
1744 allow_unsatisfied=allow_unsatisfied):
1746 if is_virt and dep.child is not None:
1747 traversed_virt_pkgs.add(dep.child)
1750 writemsg_level("\nExiting... %s\n" % (pkg,),
1751 noiselevel=-1, level=logging.DEBUG)
1755 def _minimize_children(self, parent, priority, root_config, atoms):
1757 Selects packages to satisfy the given atoms, and minimizes the
1758 number of selected packages. This serves to identify and eliminate
1759 redundant package selections when multiple atoms happen to specify
1769 dep_pkg, existing_node = self._select_package(
1770 root_config.root, atom)
1774 atom_pkg_map[atom] = dep_pkg
1776 if len(atom_pkg_map) < 2:
1777 for item in atom_pkg_map.items():
1783 for atom, pkg in atom_pkg_map.items():
1784 pkg_atom_map.setdefault(pkg, set()).add(atom)
1785 cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
1787 for pkgs in cp_pkg_map.values():
1790 for atom in pkg_atom_map[pkg]:
1794 # Use a digraph to identify and eliminate any
1795 # redundant package selections.
1796 atom_pkg_graph = digraph()
1799 for atom in pkg_atom_map[pkg1]:
1801 atom_pkg_graph.add(pkg1, atom)
1802 atom_set = InternalPackageSet(initial_atoms=(atom,),
1807 if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
1808 atom_pkg_graph.add(pkg2, atom)
1811 eliminate_pkg = True
1812 for atom in atom_pkg_graph.parent_nodes(pkg):
1813 if len(atom_pkg_graph.child_nodes(atom)) < 2:
1814 eliminate_pkg = False
1817 atom_pkg_graph.remove(pkg)
1819 # Yield ~, =*, < and <= atoms first, since those are more likely to
1820 # cause slot conflicts, and we want those atoms to be displayed
1821 # in the resulting slot conflict message (see bug #291142).
1824 for atom in cp_atoms:
1826 for child_pkg in atom_pkg_graph.child_nodes(atom):
1827 existing_node, matches = \
1828 self._check_slot_conflict(child_pkg, atom)
1829 if existing_node and not matches:
1833 conflict_atoms.append(atom)
1835 normal_atoms.append(atom)
1837 for atom in chain(conflict_atoms, normal_atoms):
1838 child_pkgs = atom_pkg_graph.child_nodes(atom)
1839 # if more than one child, yield highest version
1840 if len(child_pkgs) > 1:
1842 yield (atom, child_pkgs[-1])
1844 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1846 Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
1847 Yields non-disjunctive deps. Raises InvalidDependString when
1851 while i < len(dep_struct):
1853 if isinstance(x, list):
1854 for y in self._queue_disjunctive_deps(
1855 pkg, dep_root, dep_priority, x):
1858 self._queue_disjunction(pkg, dep_root, dep_priority,
1859 [ x, dep_struct[ i + 1 ] ] )
1863 x = portage.dep.Atom(x, eapi=pkg.metadata["EAPI"])
1864 except portage.exception.InvalidAtom:
1865 if not pkg.installed:
1866 raise portage.exception.InvalidDependString(
1867 "invalid atom: '%s'" % x)
1869 # Note: Eventually this will check for PROPERTIES=virtual
1870 # or whatever other metadata gets implemented for this
1872 if x.cp.startswith('virtual/'):
1873 self._queue_disjunction( pkg, dep_root,
1874 dep_priority, [ str(x) ] )
1879 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1880 self._dynamic_config._dep_disjunctive_stack.append(
1881 (pkg, dep_root, dep_priority, dep_struct))
1883 def _pop_disjunction(self, allow_unsatisfied):
1885 Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
1886 populate self._dynamic_config._dep_stack.
1888 pkg, dep_root, dep_priority, dep_struct = \
1889 self._dynamic_config._dep_disjunctive_stack.pop()
1890 dep_string = portage.dep.paren_enclose(dep_struct,
1891 unevaluated_atom=True)
1892 if not self._add_pkg_dep_string(
1893 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1897 def _priority(self, **kwargs):
1898 if "remove" in self._dynamic_config.myparams:
1899 priority_constructor = UnmergeDepPriority
1901 priority_constructor = DepPriority
1902 return priority_constructor(**kwargs)
1904 def _dep_expand(self, root_config, atom_without_category):
1906 @param root_config: a root config instance
1907 @type root_config: RootConfig
1908 @param atom_without_category: an atom without a category component
1909 @type atom_without_category: String
1911 @return: a list of atoms containing categories (possibly empty)
1913 null_cp = portage.dep_getkey(insert_category_into_atom(
1914 atom_without_category, "null"))
1915 cat, atom_pn = portage.catsplit(null_cp)
1917 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1919 for db, pkg_type, built, installed, db_keys in dbs:
1920 for cat in db.categories:
1921 if db.cp_list("%s/%s" % (cat, atom_pn)):
1925 for cat in categories:
1926 deps.append(Atom(insert_category_into_atom(
1927 atom_without_category, cat), allow_repo=True))
1930 def _have_new_virt(self, root, atom_cp):
1932 for db, pkg_type, built, installed, db_keys in \
1933 self._dynamic_config._filtered_trees[root]["dbs"]:
1934 if db.cp_list(atom_cp):
1939 def _iter_atoms_for_pkg(self, pkg):
1940 depgraph_sets = self._dynamic_config.sets[pkg.root]
1941 atom_arg_map = depgraph_sets.atom_arg_map
1942 for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
1943 if atom.cp != pkg.cp and \
1944 self._have_new_virt(pkg.root, atom.cp):
1947 self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
1948 visible_pkgs.reverse() # descending order
1950 for visible_pkg in visible_pkgs:
1951 if visible_pkg.cp != atom.cp:
1953 if pkg >= visible_pkg:
1954 # This is descending order, and we're not
1955 # interested in any versions <= pkg given.
1957 if pkg.slot_atom != visible_pkg.slot_atom:
1958 higher_slot = visible_pkg
1960 if higher_slot is not None:
1962 for arg in atom_arg_map[(atom, pkg.root)]:
1963 if isinstance(arg, PackageArg) and \
1968 def select_files(self, myfiles):
1969 """Given a list of .tbz2s, .ebuilds sets, and deps, populate
1970 self._dynamic_config._initial_arg_list and call self._resolve to create the
1971 appropriate depgraph and return a favorite list."""
1973 debug = "--debug" in self._frozen_config.myopts
1974 root_config = self._frozen_config.roots[self._frozen_config.target_root]
1975 sets = root_config.sets
1976 depgraph_sets = self._dynamic_config.sets[root_config.root]
1978 eroot = root_config.root
1979 root = root_config.settings['ROOT']
1980 vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
1981 real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
1982 portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
1983 bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
1984 pkgsettings = self._frozen_config.pkgsettings[eroot]
1986 onlydeps = "--onlydeps" in self._frozen_config.myopts
1989 ext = os.path.splitext(x)[1]
1991 if not os.path.exists(x):
1993 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1994 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1995 elif os.path.exists(
1996 os.path.join(pkgsettings["PKGDIR"], x)):
1997 x = os.path.join(pkgsettings["PKGDIR"], x)
1999 writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
2000 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
2001 return 0, myfavorites
2002 mytbz2=portage.xpak.tbz2(x)
2003 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2004 if os.path.realpath(x) != \
2005 os.path.realpath(bindb.bintree.getname(mykey)):
2006 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
2007 self._dynamic_config._skip_restart = True
2008 return 0, myfavorites
2010 pkg = self._pkg(mykey, "binary", root_config,
2012 args.append(PackageArg(arg=x, package=pkg,
2013 root_config=root_config))
2014 elif ext==".ebuild":
2015 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2016 pkgdir = os.path.dirname(ebuild_path)
2017 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2018 cp = pkgdir[len(tree_root)+1:]
2019 e = portage.exception.PackageNotFound(
2020 ("%s is not in a valid portage tree " + \
2021 "hierarchy or does not exist") % x)
2022 if not portage.isvalidatom(cp):
2024 cat = portage.catsplit(cp)[0]
2025 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2026 if not portage.isvalidatom("="+mykey):
2028 ebuild_path = portdb.findname(mykey)
2030 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2031 cp, os.path.basename(ebuild_path)):
2032 writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
2033 self._dynamic_config._skip_restart = True
2034 return 0, myfavorites
2035 if mykey not in portdb.xmatch(
2036 "match-visible", portage.cpv_getkey(mykey)):
2037 writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
2038 writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
2039 writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
2040 countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
2043 raise portage.exception.PackageNotFound(
2044 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2045 pkg = self._pkg(mykey, "ebuild", root_config,
2046 onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
2047 os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
2048 args.append(PackageArg(arg=x, package=pkg,
2049 root_config=root_config))
2050 elif x.startswith(os.path.sep):
2051 if not x.startswith(eroot):
2052 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2053 " $EROOT.\n") % x, noiselevel=-1)
2054 self._dynamic_config._skip_restart = True
2056 # Queue these up since it's most efficient to handle
2057 # multiple files in a single iter_owners() call.
2058 lookup_owners.append(x)
2059 elif x.startswith("." + os.sep) or \
2060 x.startswith(".." + os.sep):
2061 f = os.path.abspath(x)
2062 if not f.startswith(eroot):
2063 portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
2064 " $EROOT.\n") % (f, x), noiselevel=-1)
2065 self._dynamic_config._skip_restart = True
2067 lookup_owners.append(f)
2069 if x in ("system", "world"):
2071 if x.startswith(SETPREFIX):
2072 s = x[len(SETPREFIX):]
2074 raise portage.exception.PackageSetNotFound(s)
2075 if s in depgraph_sets.sets:
2078 depgraph_sets.sets[s] = pset
2079 args.append(SetArg(arg=x, pset=pset,
2080 root_config=root_config))
2082 if not is_valid_package_atom(x, allow_repo=True):
2083 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2085 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2086 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2087 self._dynamic_config._skip_restart = True
2089 # Don't expand categories or old-style virtuals here unless
2090 # necessary. Expansion of old-style virtuals here causes at
2091 # least the following problems:
2092 # 1) It's more difficult to determine which set(s) an atom
2093 # came from, if any.
2094 # 2) It takes away freedom from the resolver to choose other
2095 # possible expansions when necessary.
2097 args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
2098 root_config=root_config))
2100 expanded_atoms = self._dep_expand(root_config, x)
2101 installed_cp_set = set()
2102 for atom in expanded_atoms:
2103 if vardb.cp_list(atom.cp):
2104 installed_cp_set.add(atom.cp)
2106 if len(installed_cp_set) > 1:
2107 non_virtual_cps = set()
2108 for atom_cp in installed_cp_set:
2109 if not atom_cp.startswith("virtual/"):
2110 non_virtual_cps.add(atom_cp)
2111 if len(non_virtual_cps) == 1:
2112 installed_cp_set = non_virtual_cps
2114 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2115 installed_cp = next(iter(installed_cp_set))
2116 for atom in expanded_atoms:
2117 if atom.cp == installed_cp:
2119 for pkg in self._iter_match_pkgs_any(
2120 root_config, atom.without_use,
2122 if not pkg.installed:
2126 expanded_atoms = [atom]
2129 # If a non-virtual package and one or more virtual packages
2130 # are in expanded_atoms, use the non-virtual package.
2131 if len(expanded_atoms) > 1:
2132 number_of_virtuals = 0
2133 for expanded_atom in expanded_atoms:
2134 if expanded_atom.cp.startswith("virtual/"):
2135 number_of_virtuals += 1
2137 candidate = expanded_atom
2138 if len(expanded_atoms) - number_of_virtuals == 1:
2139 expanded_atoms = [ candidate ]
2141 if len(expanded_atoms) > 1:
2142 writemsg("\n\n", noiselevel=-1)
2143 ambiguous_package_name(x, expanded_atoms, root_config,
2144 self._frozen_config.spinner, self._frozen_config.myopts)
2145 self._dynamic_config._skip_restart = True
2146 return False, myfavorites
2148 atom = expanded_atoms[0]
2150 null_atom = Atom(insert_category_into_atom(x, "null"),
2152 cat, atom_pn = portage.catsplit(null_atom.cp)
2153 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2155 # Allow the depgraph to choose which virtual.
2156 atom = Atom(null_atom.replace('null/', 'virtual/', 1),
2161 if atom.use and atom.use.conditional:
2163 ("\n\n!!! '%s' contains a conditional " + \
2164 "which is not allowed.\n") % (x,), noiselevel=-1)
2165 writemsg("!!! Please check ebuild(5) for full details.\n")
2166 self._dynamic_config._skip_restart = True
2169 args.append(AtomArg(arg=x, atom=atom,
2170 root_config=root_config))
2174 search_for_multiple = False
2175 if len(lookup_owners) > 1:
2176 search_for_multiple = True
2178 for x in lookup_owners:
2179 if not search_for_multiple and os.path.isdir(x):
2180 search_for_multiple = True
2181 relative_paths.append(x[len(root)-1:])
2184 for pkg, relative_path in \
2185 real_vardb._owners.iter_owners(relative_paths):
2186 owners.add(pkg.mycpv)
2187 if not search_for_multiple:
2191 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2192 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2193 self._dynamic_config._skip_restart = True
2197 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2199 # portage now masks packages with missing slot, but it's
2200 # possible that one was installed by an older version
2201 atom = Atom(portage.cpv_getkey(cpv))
2203 atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
2204 args.append(AtomArg(arg=atom, atom=atom,
2205 root_config=root_config))
2207 if "--update" in self._frozen_config.myopts:
2208 # In some cases, the greedy slots behavior can pull in a slot that
2209 # the user would want to uninstall due to it being blocked by a
2210 # newer version in a different slot. Therefore, it's necessary to
2211 # detect and discard any that should be uninstalled. Each time
2212 # that arguments are updated, package selections are repeated in
2213 # order to ensure consistency with the current arguments:
2215 # 1) Initialize args
2216 # 2) Select packages and generate initial greedy atoms
2217 # 3) Update args with greedy atoms
2218 # 4) Select packages and generate greedy atoms again, while
2219 # accounting for any blockers between selected packages
2220 # 5) Update args with revised greedy atoms
2222 self._set_args(args)
2225 greedy_args.append(arg)
2226 if not isinstance(arg, AtomArg):
2228 for atom in self._greedy_slots(arg.root_config, arg.atom):
2230 AtomArg(arg=arg.arg, atom=atom,
2231 root_config=arg.root_config))
2233 self._set_args(greedy_args)
2236 # Revise greedy atoms, accounting for any blockers
2237 # between selected packages.
2238 revised_greedy_args = []
2240 revised_greedy_args.append(arg)
2241 if not isinstance(arg, AtomArg):
2243 for atom in self._greedy_slots(arg.root_config, arg.atom,
2244 blocker_lookahead=True):
2245 revised_greedy_args.append(
2246 AtomArg(arg=arg.arg, atom=atom,
2247 root_config=arg.root_config))
2248 args = revised_greedy_args
2249 del revised_greedy_args
2251 self._set_args(args)
2253 myfavorites = set(myfavorites)
2255 if isinstance(arg, (AtomArg, PackageArg)):
2256 myfavorites.add(arg.atom)
2257 elif isinstance(arg, SetArg):
2258 myfavorites.add(arg.arg)
2259 myfavorites = list(myfavorites)
2262 portage.writemsg("\n", noiselevel=-1)
2263 # Order needs to be preserved since a feature of --nodeps
2264 # is to allow the user to force a specific merge order.
2265 self._dynamic_config._initial_arg_list = args[:]
2267 return self._resolve(myfavorites)
2269 def _resolve(self, myfavorites):
2270 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
2271 call self._creategraph to process theier deps and return
2273 debug = "--debug" in self._frozen_config.myopts
2274 onlydeps = "--onlydeps" in self._frozen_config.myopts
2275 myroot = self._frozen_config.target_root
2276 pkgsettings = self._frozen_config.pkgsettings[myroot]
2277 pprovideddict = pkgsettings.pprovideddict
2278 virtuals = pkgsettings.getvirtuals()
2279 args = self._dynamic_config._initial_arg_list[:]
2280 for root, atom in chain(self._rebuild.rebuild_list,
2281 self._rebuild.reinstall_list):
2282 args.append(AtomArg(arg=atom, atom=atom,
2283 root_config=self._frozen_config.roots[root]))
2284 for arg in self._expand_set_args(args, add_to_digraph=True):
2285 for atom in arg.pset.getAtoms():
2286 self._spinner_update()
2287 dep = Dependency(atom=atom, onlydeps=onlydeps,
2288 root=myroot, parent=arg)
2290 pprovided = pprovideddict.get(atom.cp)
2291 if pprovided and portage.match_from_list(atom, pprovided):
2292 # A provided package has been specified on the command line.
2293 self._dynamic_config._pprovided_args.append((arg, atom))
2295 if isinstance(arg, PackageArg):
2296 if not self._add_pkg(arg.package, dep) or \
2297 not self._create_graph():
2298 if not self.need_restart():
2299 sys.stderr.write(("\n\n!!! Problem " + \
2300 "resolving dependencies for %s\n") % \
2302 return 0, myfavorites
2305 writemsg_level("\n Arg: %s\n Atom: %s\n" %
2306 (arg, atom), noiselevel=-1, level=logging.DEBUG)
2307 pkg, existing_node = self._select_package(
2308 myroot, atom, onlydeps=onlydeps)
2310 pprovided_match = False
2311 for virt_choice in virtuals.get(atom.cp, []):
2312 expanded_atom = portage.dep.Atom(
2313 atom.replace(atom.cp, virt_choice.cp, 1))
2314 pprovided = pprovideddict.get(expanded_atom.cp)
2316 portage.match_from_list(expanded_atom, pprovided):
2317 # A provided package has been
2318 # specified on the command line.
2319 self._dynamic_config._pprovided_args.append((arg, atom))
2320 pprovided_match = True
2325 if not (isinstance(arg, SetArg) and \
2326 arg.name in ("selected", "system", "world")):
2327 self._dynamic_config._unsatisfied_deps_for_display.append(
2328 ((myroot, atom), {"myparent" : arg}))
2329 return 0, myfavorites
2331 self._dynamic_config._missing_args.append((arg, atom))
2333 if atom.cp != pkg.cp:
2334 # For old-style virtuals, we need to repeat the
2335 # package.provided check against the selected package.
2336 expanded_atom = atom.replace(atom.cp, pkg.cp)
2337 pprovided = pprovideddict.get(pkg.cp)
2339 portage.match_from_list(expanded_atom, pprovided):
2340 # A provided package has been
2341 # specified on the command line.
2342 self._dynamic_config._pprovided_args.append((arg, atom))
2344 if pkg.installed and \
2345 "selective" not in self._dynamic_config.myparams and \
2346 not self._frozen_config.excluded_pkgs.findAtomForPackage(
2347 pkg, modified_use=self._pkg_use_enabled(pkg)):
2348 self._dynamic_config._unsatisfied_deps_for_display.append(
2349 ((myroot, atom), {"myparent" : arg}))
2350 # Previous behavior was to bail out in this case, but
2351 # since the dep is satisfied by the installed package,
2352 # it's more friendly to continue building the graph
2353 # and just show a warning message. Therefore, only bail
2354 # out here if the atom is not from either the system or
2356 if not (isinstance(arg, SetArg) and \
2357 arg.name in ("selected", "system", "world")):
2358 return 0, myfavorites
2360 # Add the selected package to the graph as soon as possible
2361 # so that later dep_check() calls can use it as feedback
2362 # for making more consistent atom selections.
2363 if not self._add_pkg(pkg, dep):
2364 if self.need_restart():
2366 elif isinstance(arg, SetArg):
2367 writemsg(("\n\n!!! Problem resolving " + \
2368 "dependencies for %s from %s\n") % \
2369 (atom, arg.arg), noiselevel=-1)
2371 writemsg(("\n\n!!! Problem resolving " + \
2372 "dependencies for %s\n") % \
2373 (atom,), noiselevel=-1)
2374 return 0, myfavorites
2376 except SystemExit as e:
2377 raise # Needed else can't exit
2378 except Exception as e:
2379 writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
2380 writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
2383 # Now that the root packages have been added to the graph,
2384 # process the dependencies.
2385 if not self._create_graph():
2386 return 0, myfavorites
2390 except self._unknown_internal_error:
2391 return False, myfavorites
2393 digraph_nodes = self._dynamic_config.digraph.nodes
2395 if any(x in digraph_nodes for x in
2396 self._dynamic_config._needed_unstable_keywords) or \
2397 any(x in digraph_nodes for x in
2398 self._dynamic_config._needed_p_mask_changes) or \
2399 any(x in digraph_nodes for x in
2400 self._dynamic_config._needed_use_config_changes) or \
2401 any(x in digraph_nodes for x in
2402 self._dynamic_config._needed_license_changes) :
2403 #We failed if the user needs to change the configuration
2404 self._dynamic_config._success_without_autounmask = True
2405 return False, myfavorites
2407 if self._rebuild.trigger_rebuilds():
2408 backtrack_infos = self._dynamic_config._backtrack_infos
2409 config = backtrack_infos.setdefault("config", {})
2410 config["rebuild_list"] = self._rebuild.rebuild_list
2411 config["reinstall_list"] = self._rebuild.reinstall_list
2412 self._dynamic_config._need_restart = True
2413 return False, myfavorites
2415 # We're true here unless we are missing binaries.
2416 return (True, myfavorites)
2418 def _set_args(self, args):
2420 Create the "__non_set_args__" package set from atoms and packages given as
2421 arguments. This method can be called multiple times if necessary.
2422 The package selection cache is automatically invalidated, since
2423 arguments influence package selections.
2428 for root in self._dynamic_config.sets:
2429 depgraph_sets = self._dynamic_config.sets[root]
2430 depgraph_sets.sets.setdefault('__non_set_args__',
2431 InternalPackageSet(allow_repo=True)).clear()
2432 depgraph_sets.atoms.clear()
2433 depgraph_sets.atom_arg_map.clear()
2434 set_atoms[root] = []
2435 non_set_atoms[root] = []
2437 # We don't add set args to the digraph here since that
2438 # happens at a later stage and we don't want to make
2439 # any state changes here that aren't reversed by a
2440 # another call to this method.
2441 for arg in self._expand_set_args(args, add_to_digraph=False):
2442 atom_arg_map = self._dynamic_config.sets[
2443 arg.root_config.root].atom_arg_map
2444 if isinstance(arg, SetArg):
2445 atom_group = set_atoms[arg.root_config.root]
2447 atom_group = non_set_atoms[arg.root_config.root]
2449 for atom in arg.pset.getAtoms():
2450 atom_group.append(atom)
2451 atom_key = (atom, arg.root_config.root)
2452 refs = atom_arg_map.get(atom_key)
2455 atom_arg_map[atom_key] = refs
2459 for root in self._dynamic_config.sets:
2460 depgraph_sets = self._dynamic_config.sets[root]
2461 depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
2462 non_set_atoms.get(root, [])))
2463 depgraph_sets.sets['__non_set_args__'].update(
2464 non_set_atoms.get(root, []))
2466 # Invalidate the package selection cache, since
2467 # arguments influence package selections.
2468 self._dynamic_config._highest_pkg_cache.clear()
2469 for trees in self._dynamic_config._filtered_trees.values():
2470 trees["porttree"].dbapi._clear_cache()
2472 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
2474 Return a list of slot atoms corresponding to installed slots that
2475 differ from the slot of the highest visible match. When
2476 blocker_lookahead is True, slot atoms that would trigger a blocker
2477 conflict are automatically discarded, potentially allowing automatic
2478 uninstallation of older slots when appropriate.
2480 highest_pkg, in_graph = self._select_package(root_config.root, atom)
2481 if highest_pkg is None:
2483 vardb = root_config.trees["vartree"].dbapi
2485 for cpv in vardb.match(atom):
2486 # don't mix new virtuals with old virtuals
2487 if portage.cpv_getkey(cpv) == highest_pkg.cp:
2488 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2490 slots.add(highest_pkg.metadata["SLOT"])
2494 slots.remove(highest_pkg.metadata["SLOT"])
2497 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
2498 pkg, in_graph = self._select_package(root_config.root, slot_atom)
2499 if pkg is not None and \
2500 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
2501 greedy_pkgs.append(pkg)
2504 if not blocker_lookahead:
2505 return [pkg.slot_atom for pkg in greedy_pkgs]
2508 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
2509 for pkg in greedy_pkgs + [highest_pkg]:
2510 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
2512 selected_atoms = self._select_atoms(
2513 pkg.root, dep_str, self._pkg_use_enabled(pkg),
2514 parent=pkg, strict=True)
2515 except portage.exception.InvalidDependString:
2518 for atoms in selected_atoms.values():
2519 blocker_atoms.extend(x for x in atoms if x.blocker)
2520 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
2522 if highest_pkg not in blockers:
2525 # filter packages with invalid deps
2526 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
2528 # filter packages that conflict with highest_pkg
2529 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
2530 (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
2531 blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
2536 # If two packages conflict, discard the lower version.
2537 discard_pkgs = set()
2538 greedy_pkgs.sort(reverse=True)
2539 for i in range(len(greedy_pkgs) - 1):
2540 pkg1 = greedy_pkgs[i]
2541 if pkg1 in discard_pkgs:
2543 for j in range(i + 1, len(greedy_pkgs)):
2544 pkg2 = greedy_pkgs[j]
2545 if pkg2 in discard_pkgs:
2547 if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
2548 blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
2550 discard_pkgs.add(pkg2)
2552 return [pkg.slot_atom for pkg in greedy_pkgs \
2553 if pkg not in discard_pkgs]
2555 def _select_atoms_from_graph(self, *pargs, **kwargs):
2557 Prefer atoms matching packages that have already been
2558 added to the graph or those that are installed and have
2559 not been scheduled for replacement.
2561 kwargs["trees"] = self._dynamic_config._graph_trees
2562 return self._select_atoms_highest_available(*pargs, **kwargs)
2564 def _select_atoms_highest_available(self, root, depstring,
2565 myuse=None, parent=None, strict=True, trees=None, priority=None):
2566 """This will raise InvalidDependString if necessary. If trees is
2567 None then self._dynamic_config._filtered_trees is used."""
2569 pkgsettings = self._frozen_config.pkgsettings[root]
2571 trees = self._dynamic_config._filtered_trees
2572 mytrees = trees[root]
2573 atom_graph = digraph()
2575 # Temporarily disable autounmask so that || preferences
2576 # account for masking and USE settings.
2577 _autounmask_backup = self._dynamic_config._autounmask
2578 self._dynamic_config._autounmask = False
2579 # backup state for restoration, in case of recursive
2580 # calls to this method
2581 backup_state = mytrees.copy()
2583 # clear state from previous call, in case this
2584 # call is recursive (we have a backup, that we
2585 # will use to restore it later)
2586 mytrees.pop("pkg_use_enabled", None)
2587 mytrees.pop("parent", None)
2588 mytrees.pop("atom_graph", None)
2589 mytrees.pop("priority", None)
2591 mytrees["pkg_use_enabled"] = self._pkg_use_enabled
2592 if parent is not None:
2593 mytrees["parent"] = parent
2594 mytrees["atom_graph"] = atom_graph
2595 if priority is not None:
2596 mytrees["priority"] = priority
2598 mycheck = portage.dep_check(depstring, None,
2599 pkgsettings, myuse=myuse,
2600 myroot=root, trees=trees)
2603 self._dynamic_config._autounmask = _autounmask_backup
2604 mytrees.pop("pkg_use_enabled", None)
2605 mytrees.pop("parent", None)
2606 mytrees.pop("atom_graph", None)
2607 mytrees.pop("priority", None)
2608 mytrees.update(backup_state)
2610 raise portage.exception.InvalidDependString(mycheck[1])
2612 selected_atoms = mycheck[1]
2613 elif parent not in atom_graph:
2614 selected_atoms = {parent : mycheck[1]}
2616 # Recursively traversed virtual dependencies, and their
2617 # direct dependencies, are considered to have the same
2618 # depth as direct dependencies.
2619 if parent.depth is None:
2622 virt_depth = parent.depth + 1
2623 chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
2624 selected_atoms = OrderedDict()
2625 node_stack = [(parent, None, None)]
2626 traversed_nodes = set()
2628 node, node_parent, parent_atom = node_stack.pop()
2629 traversed_nodes.add(node)
2633 if node_parent is parent:
2634 if priority is None:
2635 node_priority = None
2637 node_priority = priority.copy()
2639 # virtuals only have runtime deps
2640 node_priority = self._priority(runtime=True)
2642 k = Dependency(atom=parent_atom,
2643 blocker=parent_atom.blocker, child=node,
2644 depth=virt_depth, parent=node_parent,
2645 priority=node_priority, root=node.root)
2648 selected_atoms[k] = child_atoms
2649 for atom_node in atom_graph.child_nodes(node):
2650 child_atom = atom_node[0]
2651 if id(child_atom) not in chosen_atom_ids:
2653 child_atoms.append(child_atom)
2654 for child_node in atom_graph.child_nodes(atom_node):
2655 if child_node in traversed_nodes:
2657 if not portage.match_from_list(
2658 child_atom, [child_node]):
2659 # Typically this means that the atom
2660 # specifies USE deps that are unsatisfied
2661 # by the selected package. The caller will
2662 # record this as an unsatisfied dependency
2665 node_stack.append((child_node, node, child_atom))
2667 return selected_atoms
2669 def _expand_virt_from_graph(self, root, atom):
2670 if not isinstance(atom, Atom):
2672 graphdb = self._dynamic_config.mydbapi[root]
2673 match = graphdb.match_pkgs(atom)
2678 if not pkg.cpv.startswith("virtual/"):
2682 rdepend = self._select_atoms_from_graph(
2683 pkg.root, pkg.metadata.get("RDEPEND", ""),
2684 myuse=self._pkg_use_enabled(pkg),
2685 parent=pkg, strict=False)
2686 except InvalidDependString as e:
2687 writemsg_level("!!! Invalid RDEPEND in " + \
2688 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2689 (pkg.root, pkg.cpv, e),
2690 noiselevel=-1, level=logging.ERROR)
2694 for atoms in rdepend.values():
2696 if hasattr(atom, "_orig_atom"):
2697 # Ignore virtual atoms since we're only
2698 # interested in expanding the real atoms.
2702 def _virt_deps_visible(self, pkg, ignore_use=False):
2704 Assumes pkg is a virtual package. Traverses virtual deps recursively
2705 and returns True if all deps are visible, False otherwise. This is
2706 useful for checking if it will be necessary to expand virtual slots,
2707 for cases like bug #382557.
2710 rdepend = self._select_atoms(
2711 pkg.root, pkg.metadata.get("RDEPEND", ""),
2712 myuse=self._pkg_use_enabled(pkg),
2713 parent=pkg, priority=self._priority(runtime=True))
2714 except InvalidDependString as e:
2715 if not pkg.installed:
2717 writemsg_level("!!! Invalid RDEPEND in " + \
2718 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2719 (pkg.root, pkg.cpv, e),
2720 noiselevel=-1, level=logging.ERROR)
2723 for atoms in rdepend.values():
2726 atom = atom.without_use
2727 pkg, existing = self._select_package(
2729 if pkg is None or not self._pkg_visibility_check(pkg):
2734 def _get_dep_chain(self, start_node, target_atom=None,
2735 unsatisfied_dependency=False):
2737 Returns a list of (atom, node_type) pairs that represent a dep chain.
2738 If target_atom is None, the first package shown is pkg's parent.
2739 If target_atom is not None the first package shown is pkg.
2740 If unsatisfied_dependency is True, the first parent is select who's
2741 dependency is not satisfied by 'pkg'. This is need for USE changes.
2742 (Does not support target_atom.)
2744 traversed_nodes = set()
2748 all_parents = self._dynamic_config._parent_atoms
2749 graph = self._dynamic_config.digraph
2751 if target_atom is not None and isinstance(node, Package):
2752 affecting_use = set()
2753 for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
2755 affecting_use.update(extract_affecting_use(
2756 node.metadata[dep_str], target_atom,
2757 eapi=node.metadata["EAPI"]))
2758 except InvalidDependString:
2759 if not node.installed:
2761 affecting_use.difference_update(node.use.mask, node.use.force)
2762 pkg_name = _unicode_decode("%s") % (node.cpv,)
2765 for flag in affecting_use:
2766 if flag in self._pkg_use_enabled(node):
2769 usedep.append("-"+flag)
2770 pkg_name += "[%s]" % ",".join(usedep)
2772 dep_chain.append((pkg_name, node.type_name))
2775 # To build a dep chain for the given package we take
2776 # "random" parents form the digraph, except for the
2777 # first package, because we want a parent that forced
2778 # the corresponding change (i.e '>=foo-2', instead 'foo').
2780 traversed_nodes.add(start_node)
2782 start_node_parent_atoms = {}
2783 for ppkg, patom in all_parents.get(node, []):
2784 # Get a list of suitable atoms. For use deps
2785 # (aka unsatisfied_dependency is not None) we
2786 # need that the start_node doesn't match the atom.
2787 if not unsatisfied_dependency or \
2788 not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
2789 start_node_parent_atoms.setdefault(patom, []).append(ppkg)
2791 if start_node_parent_atoms:
2792 # If there are parents in all_parents then use one of them.
2793 # If not, then this package got pulled in by an Arg and
2794 # will be correctly handled by the code that handles later
2795 # packages in the dep chain.
2796 best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
2799 for ppkg in start_node_parent_atoms[best_match]:
2801 if ppkg in self._dynamic_config._initial_arg_list:
2802 # Stop if reached the top level of the dep chain.
2805 while node is not None:
2806 traversed_nodes.add(node)
2808 if node not in graph:
2809 # The parent is not in the graph due to backtracking.
2812 elif isinstance(node, DependencyArg):
2813 if graph.parent_nodes(node):
2816 node_type = "argument"
2817 dep_chain.append((_unicode_decode("%s") % (node,), node_type))
2819 elif node is not start_node:
2820 for ppkg, patom in all_parents[child]:
2822 if child is start_node and unsatisfied_dependency and \
2823 InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
2824 # This atom is satisfied by child, there must be another atom.
2826 atom = patom.unevaluated_atom
2830 priorities = graph.nodes[node][0].get(child)
2831 if priorities is None:
2832 # This edge comes from _parent_atoms and was not added to
2833 # the graph, and _parent_atoms does not contain priorities.
2834 dep_strings.add(node.metadata["DEPEND"])
2835 dep_strings.add(node.metadata["RDEPEND"])
2836 dep_strings.add(node.metadata["PDEPEND"])
2838 for priority in priorities:
2839 if priority.buildtime:
2840 dep_strings.add(node.metadata["DEPEND"])
2841 if priority.runtime:
2842 dep_strings.add(node.metadata["RDEPEND"])
2843 if priority.runtime_post:
2844 dep_strings.add(node.metadata["PDEPEND"])
2846 affecting_use = set()
2847 for dep_str in dep_strings:
2849 affecting_use.update(extract_affecting_use(
2850 dep_str, atom, eapi=node.metadata["EAPI"]))
2851 except InvalidDependString:
2852 if not node.installed:
2855 #Don't show flags as 'affecting' if the user can't change them,
2856 affecting_use.difference_update(node.use.mask, \
2859 pkg_name = _unicode_decode("%s") % (node.cpv,)
2862 for flag in affecting_use:
2863 if flag in self._pkg_use_enabled(node):
2866 usedep.append("-"+flag)
2867 pkg_name += "[%s]" % ",".join(usedep)
2869 dep_chain.append((pkg_name, node.type_name))
2871 # When traversing to parents, prefer arguments over packages
2872 # since arguments are root nodes. Never traverse the same
2873 # package twice, in order to prevent an infinite loop.
2875 selected_parent = None
2878 parent_unsatisfied = None
2880 for parent in self._dynamic_config.digraph.parent_nodes(node):
2881 if parent in traversed_nodes:
2883 if isinstance(parent, DependencyArg):
2886 if isinstance(parent, Package) and \
2887 parent.operation == "merge":
2888 parent_merge = parent
2889 if unsatisfied_dependency and node is start_node:
2890 # Make sure that pkg doesn't satisfy parent's dependency.
2891 # This ensures that we select the correct parent for use
2893 for ppkg, atom in all_parents[start_node]:
2895 atom_set = InternalPackageSet(initial_atoms=(atom,))
2896 if not atom_set.findAtomForPackage(start_node):
2897 parent_unsatisfied = parent
2900 selected_parent = parent
2902 if parent_unsatisfied is not None:
2903 selected_parent = parent_unsatisfied
2904 elif parent_merge is not None:
2905 # Prefer parent in the merge list (bug #354747).
2906 selected_parent = parent_merge
2907 elif parent_arg is not None:
2908 if self._dynamic_config.digraph.parent_nodes(parent_arg):
2909 selected_parent = parent_arg
2912 (_unicode_decode("%s") % (parent_arg,), "argument"))
2913 selected_parent = None
2915 node = selected_parent
2918 def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
2919 dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
2921 for node, node_type in dep_chain:
2922 if node_type == "argument":
2923 display_list.append("required by %s (argument)" % node)
2925 display_list.append("required by %s" % node)
2927 msg = "#" + ", ".join(display_list) + "\n"
2931 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
2932 check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
2934 When check_backtrack=True, no output is produced and
2935 the method either returns or raises _backtrack_mask if
2936 a matching package has been masked by backtracking.
2938 backtrack_mask = False
2939 autounmask_broke_use_dep = False
2940 atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
2942 atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
2944 xinfo = '"%s"' % atom.unevaluated_atom
2947 if isinstance(myparent, AtomArg):
2948 xinfo = _unicode_decode('"%s"') % (myparent,)
2949 # Discard null/ from failed cpv_expand category expansion.
2950 xinfo = xinfo.replace("null/", "")
2951 if root != self._frozen_config._running_root.root:
2952 xinfo = "%s for %s" % (xinfo, root)
2953 masked_packages = []
2955 missing_use_adjustable = set()
2956 required_use_unsatisfied = []
2957 masked_pkg_instances = set()
2958 have_eapi_mask = False
2959 pkgsettings = self._frozen_config.pkgsettings[root]
2960 root_config = self._frozen_config.roots[root]
2961 portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
2962 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
2963 bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
2964 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
2965 for db, pkg_type, built, installed, db_keys in dbs:
2968 if hasattr(db, "xmatch"):
2969 cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
2971 cpv_list = db.match(atom.without_use)
2973 if atom.repo is None and hasattr(db, "getRepositories"):
2974 repo_list = db.getRepositories()
2976 repo_list = [atom.repo]
2980 for cpv in cpv_list:
2981 for repo in repo_list:
2982 if not db.cpv_exists(cpv, myrepo=repo):
2985 metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
2986 built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
2987 if metadata is not None and \
2988 portage.eapi_is_supported(metadata["EAPI"]):
2990 repo = metadata.get('repository')
2991 pkg = self._pkg(cpv, pkg_type, root_config,
2992 installed=installed, myrepo=repo)
2993 # pkg.metadata contains calculated USE for ebuilds,
2994 # required later for getMissingLicenses.
2995 metadata = pkg.metadata
2997 # Avoid doing any operations with packages that
2998 # have invalid metadata. It would be unsafe at
2999 # least because it could trigger unhandled
3000 # exceptions in places like check_required_use().
3001 masked_packages.append(
3002 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3004 if not atom_set.findAtomForPackage(pkg,
3005 modified_use=self._pkg_use_enabled(pkg)):
3007 if pkg in self._dynamic_config._runtime_pkg_mask:
3008 backtrack_reasons = \
3009 self._dynamic_config._runtime_pkg_mask[pkg]
3010 mreasons.append('backtracking: %s' % \
3011 ', '.join(sorted(backtrack_reasons)))
3012 backtrack_mask = True
3013 if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3014 modified_use=self._pkg_use_enabled(pkg)):
3015 mreasons = ["exclude option"]
3017 masked_pkg_instances.add(pkg)
3018 if atom.unevaluated_atom.use:
3020 if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
3021 or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
3022 missing_use.append(pkg)
3023 if atom_set_with_use.findAtomForPackage(pkg):
3024 autounmask_broke_use_dep = True
3028 writemsg("violated_conditionals raised " + \
3029 "InvalidAtom: '%s' parent: %s" % \
3030 (atom, myparent), noiselevel=-1)
3032 if not mreasons and \
3034 pkg.metadata.get("REQUIRED_USE") and \
3035 eapi_has_required_use(pkg.metadata["EAPI"]):
3036 if not check_required_use(
3037 pkg.metadata["REQUIRED_USE"],
3038 self._pkg_use_enabled(pkg),
3039 pkg.iuse.is_valid_flag):
3040 required_use_unsatisfied.append(pkg)
3042 root_slot = (pkg.root, pkg.slot_atom)
3043 if pkg.built and root_slot in self._rebuild.rebuild_list:
3044 mreasons = ["need to rebuild from source"]
3045 elif pkg.installed and root_slot in self._rebuild.reinstall_list:
3046 mreasons = ["need to rebuild from source"]
3047 elif pkg.built and not mreasons:
3048 mreasons = ["use flag configuration mismatch"]
3049 masked_packages.append(
3050 (root_config, pkgsettings, cpv, repo, metadata, mreasons))
3054 raise self._backtrack_mask()
3058 if check_autounmask_breakage:
3059 if autounmask_broke_use_dep:
3060 raise self._autounmask_breakage()
3064 missing_use_reasons = []
3065 missing_iuse_reasons = []
3066 for pkg in missing_use:
3067 use = self._pkg_use_enabled(pkg)
3069 #Use the unevaluated atom here, because some flags might have gone
3070 #lost during evaluation.
3071 required_flags = atom.unevaluated_atom.use.required
3072 missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
3076 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3077 missing_iuse_reasons.append((pkg, mreasons))
3079 need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
3080 need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
3082 untouchable_flags = \
3083 frozenset(chain(pkg.use.mask, pkg.use.force))
3084 if any(x in untouchable_flags for x in
3085 chain(need_enable, need_disable)):
3088 missing_use_adjustable.add(pkg)
3089 required_use = pkg.metadata.get("REQUIRED_USE")
3090 required_use_warning = ""
3092 old_use = self._pkg_use_enabled(pkg)
3093 new_use = set(self._pkg_use_enabled(pkg))
3094 for flag in need_enable:
3096 for flag in need_disable:
3097 new_use.discard(flag)
3098 if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3099 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3100 required_use_warning = ", this change violates use flag constraints " + \
3101 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
3103 if need_enable or need_disable:
3105 changes.extend(colorize("red", "+" + x) \
3106 for x in need_enable)
3107 changes.extend(colorize("blue", "-" + x) \
3108 for x in need_disable)
3109 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3110 missing_use_reasons.append((pkg, mreasons))
3112 if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
3113 # Lets see if the violated use deps are conditional.
3114 # If so, suggest to change them on the parent.
3116 # If the child package is masked then a change to
3117 # parent USE is not a valid solution (a normal mask
3118 # message should be displayed instead).
3119 if pkg in masked_pkg_instances:
3123 violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
3124 pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
3125 if not (violated_atom.use.enabled or violated_atom.use.disabled):
3126 #all violated use deps are conditional
3128 conditional = violated_atom.use.conditional
3129 involved_flags = set(chain(conditional.equal, conditional.not_equal, \
3130 conditional.enabled, conditional.disabled))
3132 untouchable_flags = \
3133 frozenset(chain(myparent.use.mask, myparent.use.force))
3134 if any(x in untouchable_flags for x in involved_flags):
3137 required_use = myparent.metadata.get("REQUIRED_USE")
3138 required_use_warning = ""
3140 old_use = self._pkg_use_enabled(myparent)
3141 new_use = set(self._pkg_use_enabled(myparent))
3142 for flag in involved_flags:
3144 new_use.discard(flag)
3147 if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
3148 not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
3149 required_use_warning = ", this change violates use flag constraints " + \
3150 "defined by %s: '%s'" % (myparent.cpv, \
3151 human_readable_required_use(required_use))
3153 for flag in involved_flags:
3154 if flag in self._pkg_use_enabled(myparent):
3155 changes.append(colorize("blue", "-" + flag))
3157 changes.append(colorize("red", "+" + flag))
3158 mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
3159 if (myparent, mreasons) not in missing_use_reasons:
3160 missing_use_reasons.append((myparent, mreasons))
3162 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3163 in missing_use_reasons if pkg not in masked_pkg_instances]
3165 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3166 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3168 show_missing_use = False
3169 if unmasked_use_reasons:
3170 # Only show the latest version.
3171 show_missing_use = []
3173 parent_reason = None
3174 for pkg, mreasons in unmasked_use_reasons:
3176 if parent_reason is None:
3177 #This happens if a use change on the parent
3178 #leads to a satisfied conditional use dep.
3179 parent_reason = (pkg, mreasons)
3180 elif pkg_reason is None:
3181 #Don't rely on the first pkg in unmasked_use_reasons,
3182 #being the highest version of the dependency.
3183 pkg_reason = (pkg, mreasons)
3185 show_missing_use.append(pkg_reason)
3187 show_missing_use.append(parent_reason)
3189 elif unmasked_iuse_reasons:
3190 masked_with_iuse = False
3191 for pkg in masked_pkg_instances:
3192 #Use atom.unevaluated here, because some flags might have gone
3193 #lost during evaluation.
3194 if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3195 # Package(s) with required IUSE are masked,
3196 # so display a normal masking message.
3197 masked_with_iuse = True
3199 if not masked_with_iuse:
3200 show_missing_use = unmasked_iuse_reasons
3202 if required_use_unsatisfied:
3203 # If there's a higher unmasked version in missing_use_adjustable
3204 # then we want to show that instead.
3205 for pkg in missing_use_adjustable:
3206 if pkg not in masked_pkg_instances and \
3207 pkg > required_use_unsatisfied[0]:
3208 required_use_unsatisfied = False
3213 if show_req_use is None and required_use_unsatisfied:
3214 # We have an unmasked package that only requires USE adjustment
3215 # in order to satisfy REQUIRED_USE, and nothing more. We assume
3216 # that the user wants the latest version, so only the first
3217 # instance is displayed.
3218 show_req_use = required_use_unsatisfied[0]
3220 if show_req_use is not None:
3223 output_cpv = pkg.cpv + _repo_separator + pkg.repo
3224 writemsg("\n!!! " + \
3225 colorize("BAD", "The ebuild selected to satisfy ") + \
3226 colorize("INFORM", xinfo) + \
3227 colorize("BAD", " has unmet requirements.") + "\n",
3229 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
3230 writemsg("- %s %s\n" % (output_cpv, use_display),
3232 writemsg("\n The following REQUIRED_USE flag constraints " + \
3233 "are unsatisfied:\n", noiselevel=-1)
3234 reduced_noise = check_required_use(
3235 pkg.metadata["REQUIRED_USE"],
3236 self._pkg_use_enabled(pkg),
3237 pkg.iuse.is_valid_flag).tounicode()
3238 writemsg(" %s\n" % \
3239 human_readable_required_use(reduced_noise),
3241 normalized_required_use = \
3242 " ".join(pkg.metadata["REQUIRED_USE"].split())
3243 if reduced_noise != normalized_required_use:
3244 writemsg("\n The above constraints " + \
3245 "are a subset of the following complete expression:\n",
3247 writemsg(" %s\n" % \
3248 human_readable_required_use(normalized_required_use),
3250 writemsg("\n", noiselevel=-1)
3252 elif show_missing_use:
3253 writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3254 writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
3255 for pkg, mreasons in show_missing_use:
3256 writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
3258 elif masked_packages:
3259 writemsg("\n!!! " + \
3260 colorize("BAD", "All ebuilds that could satisfy ") + \
3261 colorize("INFORM", xinfo) + \
3262 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
3263 writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
3264 have_eapi_mask = show_masked_packages(masked_packages)
3266 writemsg("\n", noiselevel=-1)
3267 msg = ("The current version of portage supports " + \
3268 "EAPI '%s'. You must upgrade to a newer version" + \
3269 " of portage before EAPI masked packages can" + \
3270 " be installed.") % portage.const.EAPI
3271 writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
3272 writemsg("\n", noiselevel=-1)
3276 if not atom.cp.startswith("null/"):
3277 for pkg in self._iter_match_pkgs_any(
3278 root_config, Atom(atom.cp)):
3282 writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
3283 if isinstance(myparent, AtomArg) and \
3285 self._frozen_config.myopts.get(
3286 "--misspell-suggestions", "y") != "n":
3287 cp = myparent.atom.cp.lower()
3288 cat, pkg = portage.catsplit(cp)
3292 writemsg("\nemerge: searching for similar names..."
3296 all_cp.update(vardb.cp_all())
3297 if "--usepkgonly" not in self._frozen_config.myopts:
3298 all_cp.update(portdb.cp_all())
3299 if "--usepkg" in self._frozen_config.myopts:
3300 all_cp.update(bindb.cp_all())
3301 # discard dir containing no ebuilds
3305 for cp_orig in all_cp:
3306 orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
3307 all_cp = set(orig_cp_map)
3310 matches = difflib.get_close_matches(cp, all_cp)
3313 for other_cp in list(all_cp):
3314 other_pkg = portage.catsplit(other_cp)[1]
3315 if other_pkg == pkg:
3316 # Check for non-identical package that
3317 # differs only by upper/lower case.
3319 for cp_orig in orig_cp_map[other_cp]:
3320 if portage.catsplit(cp_orig)[1] != \
3321 portage.catsplit(atom.cp)[1]:
3325 # discard dir containing no ebuilds
3326 all_cp.discard(other_cp)
3328 pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
3329 pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
3331 for pkg_match in pkg_matches:
3332 matches.extend(pkg_to_cp[pkg_match])
3334 matches_orig_case = []
3336 matches_orig_case.extend(orig_cp_map[cp])
3337 matches = matches_orig_case
3339 if len(matches) == 1:
3340 writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
3342 elif len(matches) > 1:
3344 "\nemerge: Maybe you meant any of these: %s?\n" % \
3345 (", ".join(matches),), noiselevel=-1)
3347 # Generally, this would only happen if
3348 # all dbapis are empty.
3349 writemsg(" nothing similar found.\n"
3352 if not isinstance(myparent, AtomArg):
3353 # It's redundant to show parent for AtomArg since
3354 # it's the same as 'xinfo' displayed above.
3355 dep_chain = self._get_dep_chain(myparent, atom)
3356 for node, node_type in dep_chain:
3357 msg.append('(dependency required by "%s" [%s])' % \
3358 (colorize('INFORM', _unicode_decode("%s") % \
3359 (node)), node_type))
3362 writemsg("\n".join(msg), noiselevel=-1)
3363 writemsg("\n", noiselevel=-1)
3367 writemsg("\n", noiselevel=-1)
3369 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
3370 for db, pkg_type, built, installed, db_keys in \
3371 self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
3372 for pkg in self._iter_match_pkgs(root_config,
3373 pkg_type, atom, onlydeps=onlydeps):
3376 def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
3378 Iterate over Package instances of pkg_type matching the given atom.
3379 This does not check visibility and it also does not match USE for
3380 unbuilt ebuilds since USE are lazily calculated after visibility
3381 checks (to avoid the expense when possible).
3384 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
3385 atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
3386 cp_list = db.cp_list(atom_exp.cp)
3387 matched_something = False
3388 installed = pkg_type == 'installed'
3391 atom_set = InternalPackageSet(initial_atoms=(atom,),
3393 if atom.repo is None and hasattr(db, "getRepositories"):
3394 repo_list = db.getRepositories()
3396 repo_list = [atom.repo]
3401 # Call match_from_list on one cpv at a time, in order
3402 # to avoid unnecessary match_from_list comparisons on
3403 # versions that are never yielded from this method.
3404 if not match_from_list(atom_exp, [cpv]):
3406 for repo in repo_list:
3409 pkg = self._pkg(cpv, pkg_type, root_config,
3410 installed=installed, onlydeps=onlydeps, myrepo=repo)
3411 except portage.exception.PackageNotFound:
3414 # A cpv can be returned from dbapi.match() as an
3415 # old-style virtual match even in cases when the
3416 # package does not actually PROVIDE the virtual.
3417 # Filter out any such false matches here.
3419 # Make sure that cpv from the current repo satisfies the atom.
3420 # This might not be the case if there are several repos with
3421 # the same cpv, but different metadata keys, like SLOT.
3422 # Also, parts of the match that require metadata access
3423 # are deferred until we have cached the metadata in a
3425 if not atom_set.findAtomForPackage(pkg,
3426 modified_use=self._pkg_use_enabled(pkg)):
3428 matched_something = True
3431 # USE=multislot can make an installed package appear as if
3432 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3433 # won't do any good as long as USE=multislot is enabled since
3434 # the newly built package still won't have the expected slot.
3435 # Therefore, assume that such SLOT dependencies are already
3436 # satisfied rather than forcing a rebuild.
3437 if not matched_something and installed and atom.slot is not None:
3439 if "remove" in self._dynamic_config.myparams:
3440 # We need to search the portdbapi, which is not in our
3441 # normal dbs list, in order to find the real SLOT.
3442 portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
3443 db_keys = list(portdb._aux_cache_keys)
3444 dbs = [(portdb, "ebuild", False, False, db_keys)]
3446 dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
3448 cp_list = db.cp_list(atom_exp.cp)
3450 atom_set = InternalPackageSet(
3451 initial_atoms=(atom.without_slot,), allow_repo=True)
3452 atom_exp_without_slot = atom_exp.without_slot
3455 if not match_from_list(atom_exp_without_slot, [cpv]):
3457 slot_available = False
3458 for other_db, other_type, other_built, \
3459 other_installed, other_keys in dbs:
3462 other_db.aux_get(cpv, ["SLOT"])[0]:
3463 slot_available = True
3467 if not slot_available:
3469 inst_pkg = self._pkg(cpv, "installed",
3470 root_config, installed=installed, myrepo=atom.repo)
3471 # Remove the slot from the atom and verify that
3472 # the package matches the resulting atom.
3473 if atom_set.findAtomForPackage(inst_pkg):
3477 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3478 cache_key = (root, atom, atom.unevaluated_atom, onlydeps)
3479 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
3482 if pkg and not existing:
3483 existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
3484 if existing and existing == pkg:
3485 # Update the cache to reflect that the
3486 # package has been added to the graph.
3488 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3490 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3491 self._dynamic_config._highest_pkg_cache[cache_key] = ret
3494 if self._pkg_visibility_check(pkg) and \
3495 not (pkg.installed and pkg.masks):
3496 self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
3499 def _want_installed_pkg(self, pkg):
3501 Given an installed package returned from select_pkg, return
3502 True if the user has not explicitly requested for this package
3503 to be replaced (typically via an atom on the command line).
3505 if "selective" not in self._dynamic_config.myparams and \
3506 pkg.root == self._frozen_config.target_root:
3507 if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
3508 modified_use=self._pkg_use_enabled(pkg)):
3511 next(self._iter_atoms_for_pkg(pkg))
3512 except StopIteration:
3514 except portage.exception.InvalidDependString:
3520 class _AutounmaskLevel(object):
3521 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
3522 "allow_missing_keywords", "allow_unmasks")
3525 self.allow_use_changes = False
3526 self.allow_license_changes = False
3527 self.allow_unstable_keywords = False
3528 self.allow_missing_keywords = False
3529 self.allow_unmasks = False
3531 def _autounmask_levels(self):
3533 Iterate over the different allowed things to unmask.
3536 2. USE + ~arch + license
3537 3. USE + ~arch + license + missing keywords
3538 4. USE + ~arch + license + masks
3539 5. USE + ~arch + license + missing keywords + masks
3542 * Do least invasive changes first.
3543 * Try unmasking alone before unmasking + missing keywords
3544 to avoid -9999 versions if possible
3547 if self._dynamic_config._autounmask is not True:
3550 autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
3551 autounmask_level = self._AutounmaskLevel()
3553 autounmask_level.allow_use_changes = True
3555 for only_use_changes in (True, False):
3557 autounmask_level.allow_unstable_keywords = (not only_use_changes)
3558 autounmask_level.allow_license_changes = (not only_use_changes)
3560 for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
3562 if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
3565 autounmask_level.allow_missing_keywords = missing_keyword
3566 autounmask_level.allow_unmasks = unmask
3568 yield autounmask_level
3571 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3572 pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3574 default_selection = (pkg, existing)
3577 if pkg is not None and \
3579 not self._want_installed_pkg(pkg):
3582 if self._dynamic_config._autounmask is True:
3585 for autounmask_level in self._autounmask_levels():
3590 self._wrapped_select_pkg_highest_available_imp(
3591 root, atom, onlydeps=onlydeps,
3592 autounmask_level=autounmask_level)
3596 if self._dynamic_config._need_restart:
3600 # This ensures that we can fall back to an installed package
3601 # that may have been rejected in the autounmask path above.
3602 return default_selection
3604 return pkg, existing
3606 def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
3611 if trust_graph and pkg in self._dynamic_config.digraph:
3612 # Sometimes we need to temporarily disable
3613 # dynamic_config._autounmask, but for overall
3614 # consistency in dependency resolution, in most
3615 # cases we want to treat packages in the graph
3616 # as though they are visible.
3619 if not self._dynamic_config._autounmask or autounmask_level is None:
3622 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
3623 root_config = self._frozen_config.roots[pkg.root]
3624 mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
3626 masked_by_unstable_keywords = False
3627 masked_by_missing_keywords = False
3628 missing_licenses = None
3629 masked_by_something_else = False
3630 masked_by_p_mask = False
3632 for reason in mreasons:
3633 hint = reason.unmask_hint
3636 masked_by_something_else = True
3637 elif hint.key == "unstable keyword":
3638 masked_by_unstable_keywords = True
3639 if hint.value == "**":
3640 masked_by_missing_keywords = True
3641 elif hint.key == "p_mask":
3642 masked_by_p_mask = True
3643 elif hint.key == "license":
3644 missing_licenses = hint.value
3646 masked_by_something_else = True
3648 if masked_by_something_else:
3651 if pkg in self._dynamic_config._needed_unstable_keywords:
3652 #If the package is already keyworded, remove the mask.
3653 masked_by_unstable_keywords = False
3654 masked_by_missing_keywords = False
3656 if pkg in self._dynamic_config._needed_p_mask_changes:
3657 #If the package is already keyworded, remove the mask.
3658 masked_by_p_mask = False
3660 if missing_licenses:
3661 #If the needed licenses are already unmasked, remove the mask.
3662 missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
3664 if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
3665 #Package has already been unmasked.
3668 if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
3669 (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
3670 (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
3671 (missing_licenses and not autounmask_level.allow_license_changes):
3672 #We are not allowed to do the needed changes.
3675 if masked_by_unstable_keywords:
3676 self._dynamic_config._needed_unstable_keywords.add(pkg)
3677 backtrack_infos = self._dynamic_config._backtrack_infos
3678 backtrack_infos.setdefault("config", {})
3679 backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
3680 backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
3682 if masked_by_p_mask:
3683 self._dynamic_config._needed_p_mask_changes.add(pkg)
3684 backtrack_infos = self._dynamic_config._backtrack_infos
3685 backtrack_infos.setdefault("config", {})
3686 backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
3687 backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
3689 if missing_licenses:
3690 self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
3691 backtrack_infos = self._dynamic_config._backtrack_infos
3692 backtrack_infos.setdefault("config", {})
3693 backtrack_infos["config"].setdefault("needed_license_changes", set())
3694 backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
3698 def _pkg_use_enabled(self, pkg, target_use=None):
3700 If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
3701 If target_use is given, the need changes are computed to make the package useable.
3702 Example: target_use = { "foo": True, "bar": False }
3703 The flags target_use must be in the pkg's IUSE.
3706 return pkg.use.enabled
3707 needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
3709 if target_use is None:
3710 if needed_use_config_change is None:
3711 return pkg.use.enabled
3713 return needed_use_config_change[0]
3715 if needed_use_config_change is not None:
3716 old_use = needed_use_config_change[0]
3718 old_changes = needed_use_config_change[1]
3719 new_changes = old_changes.copy()
3721 old_use = pkg.use.enabled
3726 for flag, state in target_use.items():
3728 if flag not in old_use:
3729 if new_changes.get(flag) == False:
3731 new_changes[flag] = True
3735 if new_changes.get(flag) == True:
3737 new_changes[flag] = False
3738 new_use.update(old_use.difference(target_use))
3740 def want_restart_for_use_change(pkg, new_use):
3741 if pkg not in self._dynamic_config.digraph.nodes:
3744 for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
3745 dep = pkg.metadata[key]
3746 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3747 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
3749 if old_val != new_val:
3752 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
3753 if not parent_atoms:
3756 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
3757 for ppkg, atom in parent_atoms:
3758 if not atom.use or \
3759 not any(x in atom.use.required for x in changes):
3766 if new_changes != old_changes:
3767 #Don't do the change if it violates REQUIRED_USE.
3768 required_use = pkg.metadata.get("REQUIRED_USE")
3769 if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
3770 not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
3773 if any(x in pkg.use.mask for x in new_changes) or \
3774 any(x in pkg.use.force for x in new_changes):
3777 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
3778 backtrack_infos = self._dynamic_config._backtrack_infos
3779 backtrack_infos.setdefault("config", {})
3780 backtrack_infos["config"].setdefault("needed_use_config_changes", [])
3781 backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
3782 if want_restart_for_use_change(pkg, new_use):
3783 self._dynamic_config._need_restart = True
3786 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
3787 root_config = self._frozen_config.roots[root]
3788 pkgsettings = self._frozen_config.pkgsettings[root]
3789 dbs = self._dynamic_config._filtered_trees[root]["dbs"]
3790 vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
3791 # List of acceptable packages, ordered by type preference.
3792 matched_packages = []
3793 matched_pkgs_ignore_use = []
3794 highest_version = None
3795 if not isinstance(atom, portage.dep.Atom):
3796 atom = portage.dep.Atom(atom)
3798 have_new_virt = atom_cp.startswith("virtual/") and \
3799 self._have_new_virt(root, atom_cp)
3800 atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
3801 existing_node = None
3803 rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
3804 usepkg = "--usepkg" in self._frozen_config.myopts
3805 usepkgonly = "--usepkgonly" in self._frozen_config.myopts
3806 empty = "empty" in self._dynamic_config.myparams
3807 selective = "selective" in self._dynamic_config.myparams
3809 avoid_update = "--update" not in self._frozen_config.myopts
3810 dont_miss_updates = "--update" in self._frozen_config.myopts
3811 use_ebuild_visibility = self._frozen_config.myopts.get(
3812 '--use-ebuild-visibility', 'n') != 'n'
3813 reinstall_atoms = self._frozen_config.reinstall_atoms
3814 usepkg_exclude = self._frozen_config.usepkg_exclude
3815 useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
3817 # Behavior of the "selective" parameter depends on
3818 # whether or not a package matches an argument atom.
3819 # If an installed package provides an old-style
3820 # virtual that is no longer provided by an available
3821 # package, the installed package may match an argument
3822 # atom even though none of the available packages do.
3823 # Therefore, "selective" logic does not consider
3824 # whether or not an installed package matches an
3825 # argument atom. It only considers whether or not
3826 # available packages match argument atoms, which is
3827 # represented by the found_available_arg flag.
3828 found_available_arg = False
3829 packages_with_invalid_use_config = []
3830 for find_existing_node in True, False:
3833 for db, pkg_type, built, installed, db_keys in dbs:
3836 if installed and not find_existing_node:
3837 want_reinstall = reinstall or empty or \
3838 (found_available_arg and not selective)
3839 if want_reinstall and matched_packages:
3842 # Ignore USE deps for the initial match since we want to
3843 # ensure that updates aren't missed solely due to the user's
3844 # USE configuration.
3845 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
3847 if pkg.cp != atom_cp and have_new_virt:
3848 # pull in a new-style virtual instead
3850 if pkg in self._dynamic_config._runtime_pkg_mask:
3851 # The package has been masked by the backtracking logic
3853 root_slot = (pkg.root, pkg.slot_atom)
3854 if pkg.built and root_slot in self._rebuild.rebuild_list:
3856 if (pkg.installed and
3857 root_slot in self._rebuild.reinstall_list):
3860 if not pkg.installed and \
3861 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
3862 modified_use=self._pkg_use_enabled(pkg)):
3865 if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
3866 modified_use=self._pkg_use_enabled(pkg)):
3869 useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
3870 modified_use=self._pkg_use_enabled(pkg))
3872 if packages_with_invalid_use_config and (not built or not useoldpkg) and \
3873 (not pkg.installed or dont_miss_updates):
3874 # Check if a higher version was rejected due to user
3875 # USE configuration. The packages_with_invalid_use_config
3876 # list only contains unbuilt ebuilds since USE can't
3877 # be changed for built packages.
3878 higher_version_rejected = False
3879 repo_priority = pkg.repo_priority
3880 for rejected in packages_with_invalid_use_config:
3881 if rejected.cp != pkg.cp:
3884 higher_version_rejected = True
3886 if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
3887 # If version is identical then compare
3888 # repo priority (see bug #350254).
3889 rej_repo_priority = rejected.repo_priority
3890 if rej_repo_priority is not None and \
3891 (repo_priority is None or
3892 rej_repo_priority > repo_priority):
3893 higher_version_rejected = True
3895 if higher_version_rejected:
3899 reinstall_for_flags = None
3901 if not pkg.installed or \
3902 (matched_packages and not avoid_update):
3903 # Only enforce visibility on installed packages
3904 # if there is at least one other visible package
3905 # available. By filtering installed masked packages
3906 # here, packages that have been masked since they
3907 # were installed can be automatically downgraded
3908 # to an unmasked version. NOTE: This code needs to
3909 # be consistent with masking behavior inside
3910 # _dep_check_composite_db, in order to prevent
3911 # incorrect choices in || deps like bug #351828.
3913 if not self._pkg_visibility_check(pkg, autounmask_level):
3916 # Enable upgrade or downgrade to a version
3917 # with visible KEYWORDS when the installed
3918 # version is masked by KEYWORDS, but never
3919 # reinstall the same exact version only due
3920 # to a KEYWORDS mask. See bug #252167.
3922 if pkg.type_name != "ebuild" and matched_packages:
3923 # Don't re-install a binary package that is
3924 # identical to the currently installed package
3925 # (see bug #354441).
3926 identical_binary = False
3927 if usepkg and pkg.installed:
3928 for selected_pkg in matched_packages:
3929 if selected_pkg.type_name == "binary" and \
3930 selected_pkg.cpv == pkg.cpv and \
3931 selected_pkg.metadata.get('BUILD_TIME') == \
3932 pkg.metadata.get('BUILD_TIME'):
3933 identical_binary = True
3936 if not identical_binary:
3937 # If the ebuild no longer exists or it's
3938 # keywords have been dropped, reject built
3939 # instances (installed or binary).
3940 # If --usepkgonly is enabled, assume that
3941 # the ebuild status should be ignored.
3942 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
3943 if pkg.installed and pkg.masks:
3948 pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
3949 except portage.exception.PackageNotFound:
3950 pkg_eb_visible = False
3951 for pkg_eb in self._iter_match_pkgs(pkg.root_config,
3952 "ebuild", Atom("=%s" % (pkg.cpv,))):
3953 if self._pkg_visibility_check(pkg_eb, autounmask_level):
3954 pkg_eb_visible = True
3956 if not pkg_eb_visible:
3959 if not self._pkg_visibility_check(pkg_eb, autounmask_level):
3962 # Calculation of USE for unbuilt ebuilds is relatively
3963 # expensive, so it is only performed lazily, after the
3964 # above visibility checks are complete.
3967 if root == self._frozen_config.target_root:
3969 myarg = next(self._iter_atoms_for_pkg(pkg))
3970 except StopIteration:
3972 except portage.exception.InvalidDependString:
3974 # masked by corruption
3976 if not installed and myarg:
3977 found_available_arg = True
3979 if atom.unevaluated_atom.use:
3980 #Make sure we don't miss a 'missing IUSE'.
3981 if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
3982 # Don't add this to packages_with_invalid_use_config
3983 # since IUSE cannot be adjusted by the user.
3988 matched_pkgs_ignore_use.append(pkg)
3989 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
3991 for flag in atom.use.enabled:
3992 target_use[flag] = True
3993 for flag in atom.use.disabled:
3994 target_use[flag] = False
3995 use = self._pkg_use_enabled(pkg, target_use)
3997 use = self._pkg_use_enabled(pkg)
4000 can_adjust_use = not pkg.built
4001 missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
4002 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
4004 if atom.use.enabled:
4005 if any(x in atom.use.enabled for x in missing_disabled):
4007 can_adjust_use = False
4008 need_enabled = atom.use.enabled.difference(use)
4010 need_enabled = need_enabled.difference(missing_enabled)
4014 if any(x in pkg.use.mask for x in need_enabled):
4015 can_adjust_use = False
4017 if atom.use.disabled:
4018 if any(x in atom.use.disabled for x in missing_enabled):
4020 can_adjust_use = False
4021 need_disabled = atom.use.disabled.intersection(use)
4023 need_disabled = need_disabled.difference(missing_disabled)
4027 if any(x in pkg.use.force and x not in
4028 pkg.use.mask for x in need_disabled):
4029 can_adjust_use = False
4033 # Above we must ensure that this package has
4034 # absolutely no use.force, use.mask, or IUSE
4035 # issues that the user typically can't make
4036 # adjustments to solve (see bug #345979).
4037 # FIXME: Conditional USE deps complicate
4038 # issues. This code currently excludes cases
4039 # in which the user can adjust the parent
4040 # package's USE in order to satisfy the dep.
4041 packages_with_invalid_use_config.append(pkg)
4044 if pkg.cp == atom_cp:
4045 if highest_version is None:
4046 highest_version = pkg
4047 elif pkg > highest_version:
4048 highest_version = pkg
4049 # At this point, we've found the highest visible
4050 # match from the current repo. Any lower versions
4051 # from this repo are ignored, so this so the loop
4052 # will always end with a break statement below
4054 if find_existing_node:
4055 e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4059 # Use PackageSet.findAtomForPackage()
4060 # for PROVIDE support.
4061 if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
4062 if highest_version and \
4063 e_pkg.cp == atom_cp and \
4064 e_pkg < highest_version and \
4065 e_pkg.slot_atom != highest_version.slot_atom:
4066 # There is a higher version available in a
4067 # different slot, so this existing node is
4071 matched_packages.append(e_pkg)
4072 existing_node = e_pkg
4074 # Compare built package to current config and
4075 # reject the built package if necessary.
4076 if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
4077 ("--newuse" in self._frozen_config.myopts or \
4078 "--reinstall" in self._frozen_config.myopts or \
4079 (not installed and self._dynamic_config.myparams.get(
4080 "binpkg_respect_use") in ("y", "auto"))):
4081 iuses = pkg.iuse.all
4082 old_use = self._pkg_use_enabled(pkg)
4084 pkgsettings.setcpv(myeb)
4086 pkgsettings.setcpv(pkg)
4087 now_use = pkgsettings["PORTAGE_USE"].split()
4088 forced_flags = set()
4089 forced_flags.update(pkgsettings.useforce)
4090 forced_flags.update(pkgsettings.usemask)
4092 if myeb and not usepkgonly and not useoldpkg:
4093 cur_iuse = myeb.iuse.all
4094 reinstall_for_flags = self._reinstall_for_flags(pkg,
4095 forced_flags, old_use, iuses, now_use, cur_iuse)
4096 if reinstall_for_flags:
4097 if not pkg.installed:
4098 self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
4100 # Compare current config to installed package
4101 # and do not reinstall if possible.
4102 if not installed and not useoldpkg and \
4103 ("--newuse" in self._frozen_config.myopts or \
4104 "--reinstall" in self._frozen_config.myopts) and \
4105 cpv in vardb.match(atom):
4106 forced_flags = set()
4107 forced_flags.update(pkg.use.force)
4108 forced_flags.update(pkg.use.mask)
4109 inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
4110 old_use = inst_pkg.use.enabled
4111 old_iuse = inst_pkg.iuse.all
4112 cur_use = self._pkg_use_enabled(pkg)
4113 cur_iuse = pkg.iuse.all
4114 reinstall_for_flags = \
4115 self._reinstall_for_flags(pkg,
4116 forced_flags, old_use, old_iuse,
4118 if reinstall_for_flags:
4120 if reinstall_atoms.findAtomForPackage(pkg, \
4121 modified_use=self._pkg_use_enabled(pkg)):
4126 matched_oldpkg.append(pkg)
4127 matched_packages.append(pkg)
4128 if reinstall_for_flags:
4129 self._dynamic_config._reinstall_nodes[pkg] = \
4133 if not matched_packages:
4136 if "--debug" in self._frozen_config.myopts:
4137 for pkg in matched_packages:
4138 portage.writemsg("%s %s%s%s\n" % \
4139 ((pkg.type_name + ":").rjust(10),
4140 pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
4142 # Filter out any old-style virtual matches if they are
4143 # mixed with new-style virtual matches.
4145 if len(matched_packages) > 1 and \
4146 "virtual" == portage.catsplit(cp)[0]:
4147 for pkg in matched_packages:
4150 # Got a new-style virtual, so filter
4151 # out any old-style virtuals.
4152 matched_packages = [pkg for pkg in matched_packages \
4156 if existing_node is not None and \
4157 existing_node in matched_packages:
4158 return existing_node, existing_node
4160 if len(matched_packages) > 1:
4161 if rebuilt_binaries:
4165 for pkg in matched_packages:
4171 if unbuilt_pkg is None or pkg > unbuilt_pkg:
4173 if built_pkg is not None and inst_pkg is not None:
4174 # Only reinstall if binary package BUILD_TIME is
4175 # non-empty, in order to avoid cases like to
4176 # bug #306659 where BUILD_TIME fields are missing
4177 # in local and/or remote Packages file.
4179 built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
4180 except (KeyError, ValueError):
4184 installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
4185 except (KeyError, ValueError):
4186 installed_timestamp = 0
4188 if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
4190 elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
4191 minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
4192 if built_timestamp and \
4193 built_timestamp > installed_timestamp and \
4194 built_timestamp >= minimal_timestamp:
4195 return built_pkg, existing_node
4197 #Don't care if the binary has an older BUILD_TIME than the installed
4198 #package. This is for closely tracking a binhost.
4199 #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
4201 if built_timestamp and \
4202 built_timestamp != installed_timestamp:
4203 return built_pkg, existing_node
4205 for pkg in matched_packages:
4206 if pkg.installed and pkg.invalid:
4207 matched_packages = [x for x in \
4208 matched_packages if x is not pkg]
4211 for pkg in matched_packages:
4212 if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
4213 return pkg, existing_node
4215 visible_matches = []
4217 visible_matches = [pkg.cpv for pkg in matched_oldpkg \
4218 if self._pkg_visibility_check(pkg, autounmask_level)]
4219 if not visible_matches:
4220 visible_matches = [pkg.cpv for pkg in matched_packages \
4221 if self._pkg_visibility_check(pkg, autounmask_level)]
4223 bestmatch = portage.best(visible_matches)
4225 # all are masked, so ignore visibility
4226 bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
4227 matched_packages = [pkg for pkg in matched_packages \
4228 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4230 # ordered by type preference ("ebuild" type is the last resort)
4231 return matched_packages[-1], existing_node
4233 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4235 Select packages that have already been added to the graph or
4236 those that are installed and have not been scheduled for
4239 graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
4240 matches = graph_db.match_pkgs(atom)
4243 pkg = matches[-1] # highest match
4244 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4245 return pkg, in_graph
4247 def _select_pkg_from_installed(self, root, atom, onlydeps=False):
4249 Select packages that are installed.
4251 matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
4255 if len(matches) > 1:
4256 matches.reverse() # ascending order
4257 unmasked = [pkg for pkg in matches if \
4258 self._pkg_visibility_check(pkg)]
4260 if len(unmasked) == 1:
4263 # Account for packages with masks (like KEYWORDS masks)
4264 # that are usually ignored in visibility checks for
4265 # installed packages, in order to handle cases like
4267 unmasked = [pkg for pkg in matches if not pkg.masks]
4270 pkg = matches[-1] # highest match
4271 in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
4272 return pkg, in_graph
4274 def _complete_graph(self, required_sets=None):
4276 Add any deep dependencies of required sets (args, system, world) that
4277 have not been pulled into the graph yet. This ensures that the graph
4278 is consistent such that initially satisfied deep dependencies are not
4279 broken in the new graph. Initially unsatisfied dependencies are
4280 irrelevant since we only want to avoid breaking dependencies that are
4281 initially satisfied.
4283 Since this method can consume enough time to disturb users, it is
4284 currently only enabled by the --complete-graph option.
4286 @param required_sets: contains required sets (currently only used
4287 for depclean and prune removal operations)
4288 @type required_sets: dict
4290 if "--buildpkgonly" in self._frozen_config.myopts or \
4291 "recurse" not in self._dynamic_config.myparams:
4294 if "complete" not in self._dynamic_config.myparams and \
4295 self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
4296 # Enable complete mode if an installed package version will change.
4297 version_change = False
4298 for node in self._dynamic_config.digraph:
4299 if not isinstance(node, Package) or \
4300 node.operation != "merge":
4302 vardb = self._frozen_config.roots[
4303 node.root].trees["vartree"].dbapi
4304 inst_pkg = vardb.match_pkgs(node.slot_atom)
4305 if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
4306 version_change = True
4310 self._dynamic_config.myparams["complete"] = True
4312 if "complete" not in self._dynamic_config.myparams:
4317 # Put the depgraph into a mode that causes it to only
4318 # select packages that have already been added to the
4319 # graph or those that are installed and have not been
4320 # scheduled for replacement. Also, toggle the "deep"
4321 # parameter so that all dependencies are traversed and
4323 self._select_atoms = self._select_atoms_from_graph
4324 if "remove" in self._dynamic_config.myparams:
4325 self._select_package = self._select_pkg_from_installed
4327 self._select_package = self._select_pkg_from_graph
4328 self._dynamic_config._traverse_ignored_deps = True
4329 already_deep = self._dynamic_config.myparams.get("deep") is True
4330 if not already_deep:
4331 self._dynamic_config.myparams["deep"] = True
4333 # Invalidate the package selection cache, since
4334 # _select_package has just changed implementations.
4335 for trees in self._dynamic_config._filtered_trees.values():
4336 trees["porttree"].dbapi._clear_cache()
4338 args = self._dynamic_config._initial_arg_list[:]
4339 for root in self._frozen_config.roots:
4340 if root != self._frozen_config.target_root and \
4341 ("remove" in self._dynamic_config.myparams or
4342 self._frozen_config.myopts.get("--root-deps") is not None):
4343 # Only pull in deps for the relevant root.
4345 depgraph_sets = self._dynamic_config.sets[root]
4346 required_set_names = self._frozen_config._required_set_names.copy()
4347 remaining_args = required_set_names.copy()
4348 if required_sets is None or root not in required_sets:
4351 # Removal actions may override sets with temporary
4352 # replacements that have had atoms removed in order
4353 # to implement --deselect behavior.
4354 required_set_names = set(required_sets[root])
4355 depgraph_sets.sets.clear()
4356 depgraph_sets.sets.update(required_sets[root])
4357 if "remove" not in self._dynamic_config.myparams and \
4358 root == self._frozen_config.target_root and \
4360 remaining_args.difference_update(depgraph_sets.sets)
4361 if not remaining_args and \
4362 not self._dynamic_config._ignored_deps and \
4363 not self._dynamic_config._dep_stack:
4365 root_config = self._frozen_config.roots[root]
4366 for s in required_set_names:
4367 pset = depgraph_sets.sets.get(s)
4369 pset = root_config.sets[s]
4370 atom = SETPREFIX + s
4371 args.append(SetArg(arg=atom, pset=pset,
4372 root_config=root_config))
4374 self._set_args(args)
4375 for arg in self._expand_set_args(args, add_to_digraph=True):
4376 for atom in arg.pset.getAtoms():
4377 self._dynamic_config._dep_stack.append(
4378 Dependency(atom=atom, root=arg.root_config.root,
4382 if self._dynamic_config._ignored_deps:
4383 self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
4384 self._dynamic_config._ignored_deps = []
4385 if not self._create_graph(allow_unsatisfied=True):
4387 # Check the unsatisfied deps to see if any initially satisfied deps
4388 # will become unsatisfied due to an upgrade. Initially unsatisfied
4389 # deps are irrelevant since we only want to avoid breaking deps
4390 # that are initially satisfied.
4391 while self._dynamic_config._unsatisfied_deps:
4392 dep = self._dynamic_config._unsatisfied_deps.pop()
4393 vardb = self._frozen_config.roots[
4394 dep.root].trees["vartree"].dbapi
4395 matches = vardb.match_pkgs(dep.atom)
4397 self._dynamic_config._initially_unsatisfied_deps.append(dep)
4399 # An scheduled installation broke a deep dependency.
4400 # Add the installed package to the graph so that it
4401 # will be appropriately reported as a slot collision
4402 # (possibly solvable via backtracking).
4403 pkg = matches[-1] # highest match
4404 if not self._add_pkg(pkg, dep):
4406 if not self._create_graph(allow_unsatisfied=True):
4410 def _pkg(self, cpv, type_name, root_config, installed=False,
4411 onlydeps=False, myrepo = None):
4413 Get a package instance from the cache, or create a new
4414 one if necessary. Raises PackageNotFound from aux_get if it
4415 failures for some reason (package does not exist or is
4419 # Ensure that we use the specially optimized RootConfig instance
4420 # that refers to FakeVartree instead of the real vartree.
4421 root_config = self._frozen_config.roots[root_config.root]
4422 pkg = self._frozen_config._pkg_cache.get(
4423 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4424 repo_name=myrepo, root_config=root_config,
4425 installed=installed, onlydeps=onlydeps))
4426 if pkg is None and onlydeps and not installed:
4427 # Maybe it already got pulled in as a "merge" node.
4428 pkg = self._dynamic_config.mydbapi[root_config.root].get(
4429 Package._gen_hash_key(cpv=cpv, type_name=type_name,
4430 repo_name=myrepo, root_config=root_config,
4431 installed=installed, onlydeps=False))
4434 tree_type = self.pkg_tree_map[type_name]
4435 db = root_config.trees[tree_type].dbapi
4436 db_keys = list(self._frozen_config._trees_orig[root_config.root][
4437 tree_type].dbapi._aux_cache_keys)
4440 metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
4442 raise portage.exception.PackageNotFound(cpv)
4444 pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
4445 installed=installed, metadata=metadata, onlydeps=onlydeps,
4446 root_config=root_config, type_name=type_name)
4448 self._frozen_config._pkg_cache[pkg] = pkg
4450 if not self._pkg_visibility_check(pkg) and \
4451 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
4452 slot_key = (pkg.root, pkg.slot_atom)
4453 other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
4454 if other_pkg is None or pkg > other_pkg:
4455 self._frozen_config._highest_license_masked[slot_key] = pkg
4459 def _validate_blockers(self):
4460 """Remove any blockers from the digraph that do not match any of the
4461 packages within the graph. If necessary, create hard deps to ensure
4462 correct merge order such that mutually blocking packages are never
4463 installed simultaneously. Also add runtime blockers from all installed
4464 packages if any of them haven't been added already (bug 128809)."""
4466 if "--buildpkgonly" in self._frozen_config.myopts or \
4467 "--nodeps" in self._frozen_config.myopts:
4471 # Pull in blockers from all installed packages that haven't already
4472 # been pulled into the depgraph, in order to ensure that they are
4473 # respected (bug 128809). Due to the performance penalty that is
4474 # incurred by all the additional dep_check calls that are required,
4475 # blockers returned from dep_check are cached on disk by the
4476 # BlockerCache class.
4478 # For installed packages, always ignore blockers from DEPEND since
4479 # only runtime dependencies should be relevant for packages that
4480 # are already built.
4481 dep_keys = ["RDEPEND", "PDEPEND"]
4482 for myroot in self._frozen_config.trees:
4484 if self._frozen_config.myopts.get("--root-deps") is not None and \
4485 myroot != self._frozen_config.target_root:
4488 vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
4489 pkgsettings = self._frozen_config.pkgsettings[myroot]
4490 root_config = self._frozen_config.roots[myroot]
4491 final_db = self._dynamic_config.mydbapi[myroot]
4493 blocker_cache = BlockerCache(myroot, vardb)
4494 stale_cache = set(blocker_cache)
4497 stale_cache.discard(cpv)
4498 pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
4500 pkg in self._dynamic_config._traversed_pkg_deps
4502 # Check for masked installed packages. Only warn about
4503 # packages that are in the graph in order to avoid warning
4504 # about those that will be automatically uninstalled during
4505 # the merge process or by --depclean. Always warn about
4506 # packages masked by license, since the user likely wants
4507 # to adjust ACCEPT_LICENSE.
4509 if not self._pkg_visibility_check(pkg,
4510 trust_graph=False) and \
4511 (pkg_in_graph or 'LICENSE' in pkg.masks):
4512 self._dynamic_config._masked_installed.add(pkg)
4514 self._check_masks(pkg)
4516 blocker_atoms = None
4522 self._dynamic_config._blocker_parents.child_nodes(pkg))
4527 self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
4531 # Select just the runtime blockers.
4532 blockers = [blocker for blocker in blockers \
4533 if blocker.priority.runtime or \
4534 blocker.priority.runtime_post]
4535 if blockers is not None:
4536 blockers = set(blocker.atom for blocker in blockers)
4538 # If this node has any blockers, create a "nomerge"
4539 # node for it so that they can be enforced.
4540 self._spinner_update()
4541 blocker_data = blocker_cache.get(cpv)
4542 if blocker_data is not None and \
4543 blocker_data.counter != long(pkg.metadata["COUNTER"]):
4546 # If blocker data from the graph is available, use
4547 # it to validate the cache and update the cache if
4549 if blocker_data is not None and \
4550 blockers is not None:
4551 if not blockers.symmetric_difference(
4552 blocker_data.atoms):
4556 if blocker_data is None and \
4557 blockers is not None:
4558 # Re-use the blockers from the graph.
4559 blocker_atoms = sorted(blockers)
4560 counter = long(pkg.metadata["COUNTER"])
4562 blocker_cache.BlockerData(counter, blocker_atoms)
4563 blocker_cache[pkg.cpv] = blocker_data
4567 blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
4569 # Use aux_get() to trigger FakeVartree global
4570 # updates on *DEPEND when appropriate.
4571 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4572 # It is crucial to pass in final_db here in order to
4573 # optimize dep_check calls by eliminating atoms via
4574 # dep_wordreduce and dep_eval calls.
4576 success, atoms = portage.dep_check(depstr,
4577 final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
4578 trees=self._dynamic_config._graph_trees, myroot=myroot)
4581 except Exception as e:
4582 # This is helpful, for example, if a ValueError
4583 # is thrown from cpv_expand due to multiple
4584 # matches (this can happen if an atom lacks a
4586 show_invalid_depstring_notice(
4587 pkg, depstr, _unicode_decode("%s") % (e,))
4591 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4592 if replacement_pkg and \
4593 replacement_pkg[0].operation == "merge":
4594 # This package is being replaced anyway, so
4595 # ignore invalid dependencies so as not to
4596 # annoy the user too much (otherwise they'd be
4597 # forced to manually unmerge it first).
4599 show_invalid_depstring_notice(pkg, depstr, atoms)
4601 blocker_atoms = [myatom for myatom in atoms \
4603 blocker_atoms.sort()
4604 counter = long(pkg.metadata["COUNTER"])
4605 blocker_cache[cpv] = \
4606 blocker_cache.BlockerData(counter, blocker_atoms)
4609 for atom in blocker_atoms:
4610 blocker = Blocker(atom=atom,
4611 eapi=pkg.metadata["EAPI"],
4612 priority=self._priority(runtime=True),
4614 self._dynamic_config._blocker_parents.add(blocker, pkg)
4615 except portage.exception.InvalidAtom as e:
4616 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4617 show_invalid_depstring_notice(
4619 _unicode_decode("Invalid Atom: %s") % (e,))
4621 for cpv in stale_cache:
4622 del blocker_cache[cpv]
4623 blocker_cache.flush()
4626 # Discard any "uninstall" tasks scheduled by previous calls
4627 # to this method, since those tasks may not make sense given
4628 # the current graph state.
4629 previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
4630 if previous_uninstall_tasks:
4631 self._dynamic_config._blocker_uninstalls = digraph()
4632 self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
4634 for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
4635 self._spinner_update()
4636 root_config = self._frozen_config.roots[blocker.root]
4637 virtuals = root_config.settings.getvirtuals()
4638 myroot = blocker.root
4639 initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
4640 final_db = self._dynamic_config.mydbapi[myroot]
4642 provider_virtual = False
4643 if blocker.cp in virtuals and \
4644 not self._have_new_virt(blocker.root, blocker.cp):
4645 provider_virtual = True
4647 # Use this to check PROVIDE for each matched package
4649 atom_set = InternalPackageSet(
4650 initial_atoms=[blocker.atom])
4652 if provider_virtual:
4654 for provider_entry in virtuals[blocker.cp]:
4655 atoms.append(Atom(blocker.atom.replace(
4656 blocker.cp, provider_entry.cp, 1)))
4658 atoms = [blocker.atom]
4660 blocked_initial = set()
4662 for pkg in initial_db.match_pkgs(atom):
4663 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4664 blocked_initial.add(pkg)
4666 blocked_final = set()
4668 for pkg in final_db.match_pkgs(atom):
4669 if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
4670 blocked_final.add(pkg)
4672 if not blocked_initial and not blocked_final:
4673 parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
4674 self._dynamic_config._blocker_parents.remove(blocker)
4675 # Discard any parents that don't have any more blockers.
4676 for pkg in parent_pkgs:
4677 self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
4678 if not self._dynamic_config._blocker_parents.child_nodes(pkg):
4679 self._dynamic_config._blocker_parents.remove(pkg)
4681 for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
4682 unresolved_blocks = False
4683 depends_on_order = set()
4684 for pkg in blocked_initial:
4685 if pkg.slot_atom == parent.slot_atom and \
4686 not blocker.atom.blocker.overlap.forbid:
4687 # New !!atom blockers do not allow temporary
4688 # simulaneous installation, so unlike !atom
4689 # blockers, !!atom blockers aren't ignored
4690 # when they match other packages occupying
4693 if parent.installed:
4694 # Two currently installed packages conflict with
4695 # eachother. Ignore this case since the damage
4696 # is already done and this would be likely to
4697 # confuse users if displayed like a normal blocker.
4700 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4702 if parent.operation == "merge":
4703 # Maybe the blocked package can be replaced or simply
4704 # unmerged to resolve this block.
4705 depends_on_order.add((pkg, parent))
4707 # None of the above blocker resolutions techniques apply,
4708 # so apparently this one is unresolvable.
4709 unresolved_blocks = True
4710 for pkg in blocked_final:
4711 if pkg.slot_atom == parent.slot_atom and \
4712 not blocker.atom.blocker.overlap.forbid:
4713 # New !!atom blockers do not allow temporary
4714 # simulaneous installation, so unlike !atom
4715 # blockers, !!atom blockers aren't ignored
4716 # when they match other packages occupying
4719 if parent.operation == "nomerge" and \
4720 pkg.operation == "nomerge":
4721 # This blocker will be handled the next time that a
4722 # merge of either package is triggered.
4725 self._dynamic_config._blocked_pkgs.add(pkg, blocker)
4727 # Maybe the blocking package can be
4728 # unmerged to resolve this block.
4729 if parent.operation == "merge" and pkg.installed:
4730 depends_on_order.add((pkg, parent))
4732 elif parent.operation == "nomerge":
4733 depends_on_order.add((parent, pkg))
4735 # None of the above blocker resolutions techniques apply,
4736 # so apparently this one is unresolvable.
4737 unresolved_blocks = True
4739 # Make sure we don't unmerge any package that have been pulled
4741 if not unresolved_blocks and depends_on_order:
4742 for inst_pkg, inst_task in depends_on_order:
4743 if self._dynamic_config.digraph.contains(inst_pkg) and \
4744 self._dynamic_config.digraph.parent_nodes(inst_pkg):
4745 unresolved_blocks = True
4748 if not unresolved_blocks and depends_on_order:
4749 for inst_pkg, inst_task in depends_on_order:
4750 uninst_task = Package(built=inst_pkg.built,
4751 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4752 metadata=inst_pkg.metadata,
4753 operation="uninstall",
4754 root_config=inst_pkg.root_config,
4755 type_name=inst_pkg.type_name)
4756 # Enforce correct merge order with a hard dep.
4757 self._dynamic_config.digraph.addnode(uninst_task, inst_task,
4758 priority=BlockerDepPriority.instance)
4759 # Count references to this blocker so that it can be
4760 # invalidated after nodes referencing it have been
4762 self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
4763 if not unresolved_blocks and not depends_on_order:
4764 self._dynamic_config._irrelevant_blockers.add(blocker, parent)
4765 self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
4766 if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
4767 self._dynamic_config._blocker_parents.remove(blocker)
4768 if not self._dynamic_config._blocker_parents.child_nodes(parent):
4769 self._dynamic_config._blocker_parents.remove(parent)
4770 if unresolved_blocks:
4771 self._dynamic_config._unsolvable_blockers.add(blocker, parent)
4775 def _accept_blocker_conflicts(self):
4777 for x in ("--buildpkgonly", "--fetchonly",
4778 "--fetch-all-uri", "--nodeps"):
4779 if x in self._frozen_config.myopts:
4784 def _merge_order_bias(self, mygraph):
4786 For optimal leaf node selection, promote deep system runtime deps and
4787 order nodes from highest to lowest overall reference count.
4791 for node in mygraph.order:
4792 node_info[node] = len(mygraph.parent_nodes(node))
4793 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4795 def cmp_merge_preference(node1, node2):
4797 if node1.operation == 'uninstall':
4798 if node2.operation == 'uninstall':
4802 if node2.operation == 'uninstall':
4803 if node1.operation == 'uninstall':
4807 node1_sys = node1 in deep_system_deps
4808 node2_sys = node2 in deep_system_deps
4809 if node1_sys != node2_sys:
4814 return node_info[node2] - node_info[node1]
4816 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4818 def altlist(self, reversed=False):
4820 while self._dynamic_config._serialized_tasks_cache is None:
4821 self._resolve_conflicts()
4823 self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
4824 self._serialize_tasks()
4825 except self._serialize_tasks_retry:
4828 retlist = self._dynamic_config._serialized_tasks_cache[:]
4833 def _implicit_libc_deps(self, mergelist, graph):
4835 Create implicit dependencies on libc, in order to ensure that libc
4836 is installed as early as possible (see bug #303567).
4839 implicit_libc_roots = (self._frozen_config._running_root.root,)
4840 for root in implicit_libc_roots:
4841 graphdb = self._dynamic_config.mydbapi[root]
4842 vardb = self._frozen_config.trees[root]["vartree"].dbapi
4843 for atom in self._expand_virt_from_graph(root,
4844 portage.const.LIBC_PACKAGE_ATOM):
4847 match = graphdb.match_pkgs(atom)
4851 if pkg.operation == "merge" and \
4852 not vardb.cpv_exists(pkg.cpv):
4853 libc_pkgs.setdefault(pkg.root, set()).add(pkg)
4858 earlier_libc_pkgs = set()
4860 for pkg in mergelist:
4861 if not isinstance(pkg, Package):
4862 # a satisfied blocker
4864 root_libc_pkgs = libc_pkgs.get(pkg.root)
4865 if root_libc_pkgs is not None and \
4866 pkg.operation == "merge":
4867 if pkg in root_libc_pkgs:
4868 earlier_libc_pkgs.add(pkg)
4870 for libc_pkg in root_libc_pkgs:
4871 if libc_pkg in earlier_libc_pkgs:
4872 graph.add(libc_pkg, pkg,
4873 priority=DepPriority(buildtime=True))
4875 def schedulerGraph(self):
4877 The scheduler graph is identical to the normal one except that
4878 uninstall edges are reversed in specific cases that require
4879 conflicting packages to be temporarily installed simultaneously.
4880 This is intended for use by the Scheduler in it's parallelization
4881 logic. It ensures that temporary simultaneous installation of
4882 conflicting packages is avoided when appropriate (especially for
4883 !!atom blockers), but allowed in specific cases that require it.
4885 Note that this method calls break_refs() which alters the state of
4886 internal Package instances such that this depgraph instance should
4887 not be used to perform any more calculations.
4890 # NOTE: altlist initializes self._dynamic_config._scheduler_graph
4891 mergelist = self.altlist()
4892 self._implicit_libc_deps(mergelist,
4893 self._dynamic_config._scheduler_graph)
4895 # Break DepPriority.satisfied attributes which reference
4896 # installed Package instances.
4897 for parents, children, node in \
4898 self._dynamic_config._scheduler_graph.nodes.values():
4899 for priorities in chain(parents.values(), children.values()):
4900 for priority in priorities:
4901 if priority.satisfied:
4902 priority.satisfied = True
4904 pkg_cache = self._frozen_config._pkg_cache
4905 graph = self._dynamic_config._scheduler_graph
4906 trees = self._frozen_config.trees
4907 pruned_pkg_cache = {}
4908 for key, pkg in pkg_cache.items():
4909 if pkg in graph or \
4910 (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
4911 pruned_pkg_cache[key] = pkg
4914 trees[root]['vartree']._pkg_cache = pruned_pkg_cache
4918 _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
4922 def break_refs(self):
4924 Break any references in Package instances that lead back to the depgraph.
4925 This is useful if you want to hold references to packages without also
4926 holding the depgraph on the heap. It should only be called after the
4927 depgraph and _frozen_config will not be used for any more calculations.
4929 for root_config in self._frozen_config.roots.values():
4930 root_config.update(self._frozen_config._trees_orig[
4931 root_config.root]["root_config"])
4932 # Both instances are now identical, so discard the
4933 # original which should have no other references.
4934 self._frozen_config._trees_orig[
4935 root_config.root]["root_config"] = root_config
4937 def _resolve_conflicts(self):
4938 if not self._complete_graph():
4939 raise self._unknown_internal_error()
4941 if not self._validate_blockers():
4942 self._dynamic_config._skip_restart = True
4943 raise self._unknown_internal_error()
4945 if self._dynamic_config._slot_collision_info:
4946 self._process_slot_conflicts()
4948 def _serialize_tasks(self):
4950 debug = "--debug" in self._frozen_config.myopts
4953 writemsg("\ndigraph:\n\n", noiselevel=-1)
4954 self._dynamic_config.digraph.debug_print()
4955 writemsg("\n", noiselevel=-1)
4957 scheduler_graph = self._dynamic_config.digraph.copy()
4959 if '--nodeps' in self._frozen_config.myopts:
4960 # Preserve the package order given on the command line.
4961 return ([node for node in scheduler_graph \
4962 if isinstance(node, Package) \
4963 and node.operation == 'merge'], scheduler_graph)
4965 mygraph=self._dynamic_config.digraph.copy()
4967 removed_nodes = set()
4969 # Prune off all DependencyArg instances since they aren't
4970 # needed, and because of nested sets this is faster than doing
4971 # it with multiple digraph.root_nodes() calls below. This also
4972 # takes care of nested sets that have circular references,
4973 # which wouldn't be matched by digraph.root_nodes().
4974 for node in mygraph:
4975 if isinstance(node, DependencyArg):
4976 removed_nodes.add(node)
4978 mygraph.difference_update(removed_nodes)
4979 removed_nodes.clear()
4981 # Prune "nomerge" root nodes if nothing depends on them, since
4982 # otherwise they slow down merge order calculation. Don't remove
4983 # non-root nodes since they help optimize merge order in some cases
4984 # such as revdep-rebuild.
4987 for node in mygraph.root_nodes():
4988 if not isinstance(node, Package) or \
4989 node.installed or node.onlydeps:
4990 removed_nodes.add(node)
4992 self._spinner_update()
4993 mygraph.difference_update(removed_nodes)
4994 if not removed_nodes:
4996 removed_nodes.clear()
4997 self._merge_order_bias(mygraph)
4998 def cmp_circular_bias(n1, n2):
5000 RDEPEND is stronger than PDEPEND and this function
5001 measures such a strength bias within a circular
5002 dependency relationship.
5004 n1_n2_medium = n2 in mygraph.child_nodes(n1,
5005 ignore_priority=priority_range.ignore_medium_soft)
5006 n2_n1_medium = n1 in mygraph.child_nodes(n2,
5007 ignore_priority=priority_range.ignore_medium_soft)
5008 if n1_n2_medium == n2_n1_medium:
5013 myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
5015 # Contains uninstall tasks that have been scheduled to
5016 # occur after overlapping blockers have been installed.
5017 scheduled_uninstalls = set()
5018 # Contains any Uninstall tasks that have been ignored
5019 # in order to avoid the circular deps code path. These
5020 # correspond to blocker conflicts that could not be
5022 ignored_uninstall_tasks = set()
5023 have_uninstall_task = False
5024 complete = "complete" in self._dynamic_config.myparams
5027 def get_nodes(**kwargs):
5029 Returns leaf nodes excluding Uninstall instances
5030 since those should be executed as late as possible.
5032 return [node for node in mygraph.leaf_nodes(**kwargs) \
5033 if isinstance(node, Package) and \
5034 (node.operation != "uninstall" or \
5035 node in scheduled_uninstalls)]
5037 # sys-apps/portage needs special treatment if ROOT="/"
5038 running_root = self._frozen_config._running_root.root
5039 runtime_deps = InternalPackageSet(
5040 initial_atoms=[PORTAGE_PACKAGE_ATOM])
5041 running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
5042 PORTAGE_PACKAGE_ATOM)
5043 replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
5044 PORTAGE_PACKAGE_ATOM)
5047 running_portage = running_portage[0]
5049 running_portage = None
5051 if replacement_portage:
5052 replacement_portage = replacement_portage[0]
5054 replacement_portage = None
5056 if replacement_portage == running_portage:
5057 replacement_portage = None
5059 if running_portage is not None:
5061 portage_rdepend = self._select_atoms_highest_available(
5062 running_root, running_portage.metadata["RDEPEND"],
5063 myuse=self._pkg_use_enabled(running_portage),
5064 parent=running_portage, strict=False)
5065 except portage.exception.InvalidDependString as e:
5066 portage.writemsg("!!! Invalid RDEPEND in " + \
5067 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
5068 (running_root, running_portage.cpv, e), noiselevel=-1)
5070 portage_rdepend = {running_portage : []}
5071 for atoms in portage_rdepend.values():
5072 runtime_deps.update(atom for atom in atoms \
5073 if not atom.blocker)
5075 # Merge libc asap, in order to account for implicit
5076 # dependencies. See bug #303567.
5077 implicit_libc_roots = (running_root,)
5078 for root in implicit_libc_roots:
5080 vardb = self._frozen_config.trees[root]["vartree"].dbapi
5081 graphdb = self._dynamic_config.mydbapi[root]
5082 for atom in self._expand_virt_from_graph(root,
5083 portage.const.LIBC_PACKAGE_ATOM):
5086 match = graphdb.match_pkgs(atom)
5090 if pkg.operation == "merge" and \
5091 not vardb.cpv_exists(pkg.cpv):
5095 # If there's also an os-headers upgrade, we need to
5096 # pull that in first. See bug #328317.
5097 for atom in self._expand_virt_from_graph(root,
5098 portage.const.OS_HEADERS_PACKAGE_ATOM):
5101 match = graphdb.match_pkgs(atom)
5105 if pkg.operation == "merge" and \
5106 not vardb.cpv_exists(pkg.cpv):
5107 asap_nodes.append(pkg)
5109 asap_nodes.extend(libc_pkgs)
5111 def gather_deps(ignore_priority, mergeable_nodes,
5112 selected_nodes, node):
5114 Recursively gather a group of nodes that RDEPEND on
5115 eachother. This ensures that they are merged as a group
5116 and get their RDEPENDs satisfied as soon as possible.
5118 if node in selected_nodes:
5120 if node not in mergeable_nodes:
5122 if node == replacement_portage and \
5123 mygraph.child_nodes(node,
5124 ignore_priority=priority_range.ignore_medium_soft):
5125 # Make sure that portage always has all of it's
5126 # RDEPENDs installed first.
5128 selected_nodes.add(node)
5129 for child in mygraph.child_nodes(node,
5130 ignore_priority=ignore_priority):
5131 if not gather_deps(ignore_priority,
5132 mergeable_nodes, selected_nodes, child):
5136 def ignore_uninst_or_med(priority):
5137 if priority is BlockerDepPriority.instance:
5139 return priority_range.ignore_medium(priority)
5141 def ignore_uninst_or_med_soft(priority):
5142 if priority is BlockerDepPriority.instance:
5144 return priority_range.ignore_medium_soft(priority)
5146 tree_mode = "--tree" in self._frozen_config.myopts
5147 # Tracks whether or not the current iteration should prefer asap_nodes
5148 # if available. This is set to False when the previous iteration
5149 # failed to select any nodes. It is reset whenever nodes are
5150 # successfully selected.
5153 # Controls whether or not the current iteration should drop edges that
5154 # are "satisfied" by installed packages, in order to solve circular
5155 # dependencies. The deep runtime dependencies of installed packages are
5156 # not checked in this case (bug #199856), so it must be avoided
5157 # whenever possible.
5158 drop_satisfied = False
5160 # State of variables for successive iterations that loosen the
5161 # criteria for node selection.
5163 # iteration prefer_asap drop_satisfied
5168 # If no nodes are selected on the last iteration, it is due to
5169 # unresolved blockers or circular dependencies.
5172 self._spinner_update()
5173 selected_nodes = None
5174 ignore_priority = None
5175 if drop_satisfied or (prefer_asap and asap_nodes):
5176 priority_range = DepPrioritySatisfiedRange
5178 priority_range = DepPriorityNormalRange
5179 if prefer_asap and asap_nodes:
5180 # ASAP nodes are merged before their soft deps. Go ahead and
5181 # select root nodes here if necessary, since it's typical for
5182 # the parent to have been removed from the graph already.
5183 asap_nodes = [node for node in asap_nodes \
5184 if mygraph.contains(node)]
5185 for i in range(priority_range.SOFT,
5186 priority_range.MEDIUM_SOFT + 1):
5187 ignore_priority = priority_range.ignore_priority[i]
5188 for node in asap_nodes:
5189 if not mygraph.child_nodes(node,
5190 ignore_priority=ignore_priority):
5191 selected_nodes = [node]
5192 asap_nodes.remove(node)
5197 if not selected_nodes and \
5198 not (prefer_asap and asap_nodes):
5199 for i in range(priority_range.NONE,
5200 priority_range.MEDIUM_SOFT + 1):
5201 ignore_priority = priority_range.ignore_priority[i]
5202 nodes = get_nodes(ignore_priority=ignore_priority)
5204 # If there is a mixture of merges and uninstalls,
5205 # do the uninstalls first.
5206 good_uninstalls = None
5208 good_uninstalls = []
5210 if node.operation == "uninstall":
5211 good_uninstalls.append(node)
5214 nodes = good_uninstalls
5218 if good_uninstalls or len(nodes) == 1 or \
5219 (ignore_priority is None and \
5220 not asap_nodes and not tree_mode):
5221 # Greedily pop all of these nodes since no
5222 # relationship has been ignored. This optimization
5223 # destroys --tree output, so it's disabled in tree
5225 selected_nodes = nodes
5227 # For optimal merge order:
5228 # * Only pop one node.
5229 # * Removing a root node (node without a parent)
5230 # will not produce a leaf node, so avoid it.
5231 # * It's normal for a selected uninstall to be a
5232 # root node, so don't check them for parents.
5234 prefer_asap_parents = (True, False)
5236 prefer_asap_parents = (False,)
5237 for check_asap_parent in prefer_asap_parents:
5238 if check_asap_parent:
5240 parents = mygraph.parent_nodes(node,
5241 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
5242 if any(x in asap_nodes for x in parents):
5243 selected_nodes = [node]
5247 if mygraph.parent_nodes(node):
5248 selected_nodes = [node]
5255 if not selected_nodes:
5256 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
5258 mergeable_nodes = set(nodes)
5259 if prefer_asap and asap_nodes:
5261 # When gathering the nodes belonging to a runtime cycle,
5262 # we want to minimize the number of nodes gathered, since
5263 # this tends to produce a more optimal merge order.
5264 # Ignoring all medium_soft deps serves this purpose.
5265 # In the case of multiple runtime cycles, where some cycles
5266 # may depend on smaller independent cycles, it's optimal
5267 # to merge smaller independent cycles before other cycles
5268 # that depend on them. Therefore, we search for the
5269 # smallest cycle in order to try and identify and prefer
5270 # these smaller independent cycles.
5271 ignore_priority = priority_range.ignore_medium_soft
5272 smallest_cycle = None
5274 if not mygraph.parent_nodes(node):
5276 selected_nodes = set()
5277 if gather_deps(ignore_priority,
5278 mergeable_nodes, selected_nodes, node):
5279 # When selecting asap_nodes, we need to ensure
5280 # that we haven't selected a large runtime cycle
5281 # that is obviously sub-optimal. This will be
5282 # obvious if any of the non-asap selected_nodes
5283 # is a leaf node when medium_soft deps are
5285 if prefer_asap and asap_nodes and \
5286 len(selected_nodes) > 1:
5287 for node in selected_nodes.difference(
5289 if not mygraph.child_nodes(node,
5291 DepPriorityNormalRange.ignore_medium_soft):
5292 selected_nodes = None
5295 if smallest_cycle is None or \
5296 len(selected_nodes) < len(smallest_cycle):
5297 smallest_cycle = selected_nodes
5299 selected_nodes = smallest_cycle
5301 if selected_nodes and debug:
5302 writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
5303 (len(selected_nodes),), noiselevel=-1)
5304 cycle_digraph = mygraph.copy()
5305 cycle_digraph.difference_update([x for x in
5306 cycle_digraph if x not in selected_nodes])
5307 cycle_digraph.debug_print()
5308 writemsg("\n", noiselevel=-1)
5310 if prefer_asap and asap_nodes and not selected_nodes:
5311 # We failed to find any asap nodes to merge, so ignore
5312 # them for the next iteration.
5316 if selected_nodes and ignore_priority is not None:
5317 # Try to merge ignored medium_soft deps as soon as possible
5318 # if they're not satisfied by installed packages.
5319 for node in selected_nodes:
5320 children = set(mygraph.child_nodes(node))
5321 soft = children.difference(
5322 mygraph.child_nodes(node,
5323 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
5324 medium_soft = children.difference(
5325 mygraph.child_nodes(node,
5327 DepPrioritySatisfiedRange.ignore_medium_soft))
5328 medium_soft.difference_update(soft)
5329 for child in medium_soft:
5330 if child in selected_nodes:
5332 if child in asap_nodes:
5334 # Merge PDEPEND asap for bug #180045.
5335 asap_nodes.append(child)
5337 if selected_nodes and len(selected_nodes) > 1:
5338 if not isinstance(selected_nodes, list):
5339 selected_nodes = list(selected_nodes)
5340 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
5342 if not selected_nodes and myblocker_uninstalls:
5343 # An Uninstall task needs to be executed in order to
5344 # avoid conflict if possible.
5347 priority_range = DepPrioritySatisfiedRange
5349 priority_range = DepPriorityNormalRange
5351 mergeable_nodes = get_nodes(
5352 ignore_priority=ignore_uninst_or_med)
5354 min_parent_deps = None
5357 for task in myblocker_uninstalls.leaf_nodes():
5358 # Do some sanity checks so that system or world packages
5359 # don't get uninstalled inappropriately here (only really
5360 # necessary when --complete-graph has not been enabled).
5362 if task in ignored_uninstall_tasks:
5365 if task in scheduled_uninstalls:
5366 # It's been scheduled but it hasn't
5367 # been executed yet due to dependence
5368 # on installation of blocking packages.
5371 root_config = self._frozen_config.roots[task.root]
5372 inst_pkg = self._pkg(task.cpv, "installed", root_config,
5375 if self._dynamic_config.digraph.contains(inst_pkg):
5378 forbid_overlap = False
5379 heuristic_overlap = False
5380 for blocker in myblocker_uninstalls.parent_nodes(task):
5381 if not eapi_has_strong_blocks(blocker.eapi):
5382 heuristic_overlap = True
5383 elif blocker.atom.blocker.overlap.forbid:
5384 forbid_overlap = True
5386 if forbid_overlap and running_root == task.root:
5389 if heuristic_overlap and running_root == task.root:
5390 # Never uninstall sys-apps/portage or it's essential
5391 # dependencies, except through replacement.
5393 runtime_dep_atoms = \
5394 list(runtime_deps.iterAtomsForPackage(task))
5395 except portage.exception.InvalidDependString as e:
5396 portage.writemsg("!!! Invalid PROVIDE in " + \
5397 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5398 (task.root, task.cpv, e), noiselevel=-1)
5402 # Don't uninstall a runtime dep if it appears
5403 # to be the only suitable one installed.
5405 vardb = root_config.trees["vartree"].dbapi
5406 for atom in runtime_dep_atoms:
5407 other_version = None
5408 for pkg in vardb.match_pkgs(atom):
5409 if pkg.cpv == task.cpv and \
5410 pkg.metadata["COUNTER"] == \
5411 task.metadata["COUNTER"]:
5415 if other_version is None:
5421 # For packages in the system set, don't take
5422 # any chances. If the conflict can't be resolved
5423 # by a normal replacement operation then abort.
5426 for atom in root_config.sets[
5427 "system"].iterAtomsForPackage(task):
5430 except portage.exception.InvalidDependString as e:
5431 portage.writemsg("!!! Invalid PROVIDE in " + \
5432 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5433 (task.root, task.cpv, e), noiselevel=-1)
5439 # Note that the world check isn't always
5440 # necessary since self._complete_graph() will
5441 # add all packages from the system and world sets to the
5442 # graph. This just allows unresolved conflicts to be
5443 # detected as early as possible, which makes it possible
5444 # to avoid calling self._complete_graph() when it is
5445 # unnecessary due to blockers triggering an abortion.
5447 # For packages in the world set, go ahead an uninstall
5448 # when necessary, as long as the atom will be satisfied
5449 # in the final state.
5450 graph_db = self._dynamic_config.mydbapi[task.root]
5453 for atom in root_config.sets[
5454 "selected"].iterAtomsForPackage(task):
5456 for pkg in graph_db.match_pkgs(atom):
5463 self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
5465 except portage.exception.InvalidDependString as e:
5466 portage.writemsg("!!! Invalid PROVIDE in " + \
5467 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5468 (task.root, task.cpv, e), noiselevel=-1)
5474 # Check the deps of parent nodes to ensure that
5475 # the chosen task produces a leaf node. Maybe
5476 # this can be optimized some more to make the
5477 # best possible choice, but the current algorithm
5478 # is simple and should be near optimal for most
5480 self._spinner_update()
5481 mergeable_parent = False
5483 parent_deps.add(task)
5484 for parent in mygraph.parent_nodes(task):
5485 parent_deps.update(mygraph.child_nodes(parent,
5486 ignore_priority=priority_range.ignore_medium_soft))
5487 if min_parent_deps is not None and \
5488 len(parent_deps) >= min_parent_deps:
5489 # This task is no better than a previously selected
5490 # task, so abort search now in order to avoid wasting
5491 # any more cpu time on this task. This increases
5492 # performance dramatically in cases when there are
5493 # hundreds of blockers to solve, like when
5494 # upgrading to a new slot of kde-meta.
5495 mergeable_parent = None
5497 if parent in mergeable_nodes and \
5498 gather_deps(ignore_uninst_or_med_soft,
5499 mergeable_nodes, set(), parent):
5500 mergeable_parent = True
5502 if not mergeable_parent:
5505 if min_parent_deps is None or \
5506 len(parent_deps) < min_parent_deps:
5507 min_parent_deps = len(parent_deps)
5510 if uninst_task is not None and min_parent_deps == 1:
5511 # This is the best possible result, so so abort search
5512 # now in order to avoid wasting any more cpu time.
5515 if uninst_task is not None:
5516 # The uninstall is performed only after blocking
5517 # packages have been merged on top of it. File
5518 # collisions between blocking packages are detected
5519 # and removed from the list of files to be uninstalled.
5520 scheduled_uninstalls.add(uninst_task)
5521 parent_nodes = mygraph.parent_nodes(uninst_task)
5523 # Reverse the parent -> uninstall edges since we want
5524 # to do the uninstall after blocking packages have
5525 # been merged on top of it.
5526 mygraph.remove(uninst_task)
5527 for blocked_pkg in parent_nodes:
5528 mygraph.add(blocked_pkg, uninst_task,
5529 priority=BlockerDepPriority.instance)
5530 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5531 scheduler_graph.add(blocked_pkg, uninst_task,
5532 priority=BlockerDepPriority.instance)
5534 # Sometimes a merge node will render an uninstall
5535 # node unnecessary (due to occupying the same SLOT),
5536 # and we want to avoid executing a separate uninstall
5537 # task in that case.
5538 slot_node = self._dynamic_config.mydbapi[uninst_task.root
5539 ].match_pkgs(uninst_task.slot_atom)
5541 slot_node[0].operation == "merge":
5542 mygraph.add(slot_node[0], uninst_task,
5543 priority=BlockerDepPriority.instance)
5545 # Reset the state variables for leaf node selection and
5546 # continue trying to select leaf nodes.
5548 drop_satisfied = False
5551 if not selected_nodes:
5552 # Only select root nodes as a last resort. This case should
5553 # only trigger when the graph is nearly empty and the only
5554 # remaining nodes are isolated (no parents or children). Since
5555 # the nodes must be isolated, ignore_priority is not needed.
5556 selected_nodes = get_nodes()
5558 if not selected_nodes and not drop_satisfied:
5559 drop_satisfied = True
5562 if not selected_nodes and myblocker_uninstalls:
5563 # If possible, drop an uninstall task here in order to avoid
5564 # the circular deps code path. The corresponding blocker will
5565 # still be counted as an unresolved conflict.
5567 for node in myblocker_uninstalls.leaf_nodes():
5569 mygraph.remove(node)
5574 ignored_uninstall_tasks.add(node)
5577 if uninst_task is not None:
5578 # Reset the state variables for leaf node selection and
5579 # continue trying to select leaf nodes.
5581 drop_satisfied = False
5584 if not selected_nodes:
5585 self._dynamic_config._circular_deps_for_display = mygraph
5586 self._dynamic_config._skip_restart = True
5587 raise self._unknown_internal_error()
5589 # At this point, we've succeeded in selecting one or more nodes, so
5590 # reset state variables for leaf node selection.
5592 drop_satisfied = False
5594 mygraph.difference_update(selected_nodes)
5596 for node in selected_nodes:
5597 if isinstance(node, Package) and \
5598 node.operation == "nomerge":
5601 # Handle interactions between blockers
5602 # and uninstallation tasks.
5603 solved_blockers = set()
5605 if isinstance(node, Package) and \
5606 "uninstall" == node.operation:
5607 have_uninstall_task = True
5610 vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
5611 inst_pkg = vardb.match_pkgs(node.slot_atom)
5613 # The package will be replaced by this one, so remove
5614 # the corresponding Uninstall task if necessary.
5615 inst_pkg = inst_pkg[0]
5616 uninst_task = Package(built=inst_pkg.built,
5617 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
5618 metadata=inst_pkg.metadata,
5619 operation="uninstall",
5620 root_config=inst_pkg.root_config,
5621 type_name=inst_pkg.type_name)
5623 mygraph.remove(uninst_task)
5627 if uninst_task is not None and \
5628 uninst_task not in ignored_uninstall_tasks and \
5629 myblocker_uninstalls.contains(uninst_task):
5630 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
5631 myblocker_uninstalls.remove(uninst_task)
5632 # Discard any blockers that this Uninstall solves.
5633 for blocker in blocker_nodes:
5634 if not myblocker_uninstalls.child_nodes(blocker):
5635 myblocker_uninstalls.remove(blocker)
5637 self._dynamic_config._unsolvable_blockers:
5638 solved_blockers.add(blocker)
5640 retlist.append(node)
5642 if (isinstance(node, Package) and \
5643 "uninstall" == node.operation) or \
5644 (uninst_task is not None and \
5645 uninst_task in scheduled_uninstalls):
5646 # Include satisfied blockers in the merge list
5647 # since the user might be interested and also
5648 # it serves as an indicator that blocking packages
5649 # will be temporarily installed simultaneously.
5650 for blocker in solved_blockers:
5651 retlist.append(blocker)
5653 unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
5654 for node in myblocker_uninstalls.root_nodes():
5655 unsolvable_blockers.add(node)
5657 # If any Uninstall tasks need to be executed in order
5658 # to avoid a conflict, complete the graph with any
5659 # dependencies that may have been initially
5660 # neglected (to ensure that unsafe Uninstall tasks
5661 # are properly identified and blocked from execution).
5662 if have_uninstall_task and \
5664 not unsolvable_blockers:
5665 self._dynamic_config.myparams["complete"] = True
5666 if '--debug' in self._frozen_config.myopts:
5668 msg.append("enabling 'complete' depgraph mode " + \
5669 "due to uninstall task(s):")
5671 for node in retlist:
5672 if isinstance(node, Package) and \
5673 node.operation == 'uninstall':
5674 msg.append("\t%s" % (node,))
5675 writemsg_level("\n%s\n" % \
5676 "".join("%s\n" % line for line in msg),
5677 level=logging.DEBUG, noiselevel=-1)
5678 raise self._serialize_tasks_retry("")
5680 # Set satisfied state on blockers, but not before the
5681 # above retry path, since we don't want to modify the
5682 # state in that case.
5683 for node in retlist:
5684 if isinstance(node, Blocker):
5685 node.satisfied = True
5687 for blocker in unsolvable_blockers:
5688 retlist.append(blocker)
5690 if unsolvable_blockers and \
5691 not self._accept_blocker_conflicts():
5692 self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
5693 self._dynamic_config._serialized_tasks_cache = retlist[:]
5694 self._dynamic_config._scheduler_graph = scheduler_graph
5695 self._dynamic_config._skip_restart = True
5696 raise self._unknown_internal_error()
5698 if self._dynamic_config._slot_collision_info and \
5699 not self._accept_blocker_conflicts():
5700 self._dynamic_config._serialized_tasks_cache = retlist[:]
5701 self._dynamic_config._scheduler_graph = scheduler_graph
5702 raise self._unknown_internal_error()
5704 return retlist, scheduler_graph
5706 def _show_circular_deps(self, mygraph):
5707 self._dynamic_config._circular_dependency_handler = \
5708 circular_dependency_handler(self, mygraph)
5709 handler = self._dynamic_config._circular_dependency_handler
5711 self._frozen_config.myopts.pop("--quiet", None)
5712 self._frozen_config.myopts["--verbose"] = True
5713 self._frozen_config.myopts["--tree"] = True
5714 portage.writemsg("\n\n", noiselevel=-1)
5715 self.display(handler.merge_list)
5716 prefix = colorize("BAD", " * ")
5717 portage.writemsg("\n", noiselevel=-1)
5718 portage.writemsg(prefix + "Error: circular dependencies:\n",
5720 portage.writemsg("\n", noiselevel=-1)
5722 if handler.circular_dep_message is None:
5723 handler.debug_print()
5724 portage.writemsg("\n", noiselevel=-1)
5726 if handler.circular_dep_message is not None:
5727 portage.writemsg(handler.circular_dep_message, noiselevel=-1)
5729 suggestions = handler.suggestions
5731 writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
5732 if len(suggestions) == 1:
5733 writemsg("by applying the following change:\n", noiselevel=-1)
5735 writemsg("by applying " + colorize("bold", "any of") + \
5736 " the following changes:\n", noiselevel=-1)
5737 writemsg("".join(suggestions), noiselevel=-1)
5738 writemsg("\nNote that this change can be reverted, once the package has" + \
5739 " been installed.\n", noiselevel=-1)
5740 if handler.large_cycle_count:
5741 writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
5742 "Several changes might be required to resolve all cycles.\n" + \
5743 "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
5745 writemsg("\n\n", noiselevel=-1)
5746 writemsg(prefix + "Note that circular dependencies " + \
5747 "can often be avoided by temporarily\n", noiselevel=-1)
5748 writemsg(prefix + "disabling USE flags that trigger " + \
5749 "optional dependencies.\n", noiselevel=-1)
5751 def _show_merge_list(self):
5752 if self._dynamic_config._serialized_tasks_cache is not None and \
5753 not (self._dynamic_config._displayed_list is not None and \
5754 (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
5755 self._dynamic_config._displayed_list == \
5756 list(reversed(self._dynamic_config._serialized_tasks_cache)))):
5757 display_list = self._dynamic_config._serialized_tasks_cache[:]
5758 if "--tree" in self._frozen_config.myopts:
5759 display_list.reverse()
5760 self.display(display_list)
5762 def _show_unsatisfied_blockers(self, blockers):
5763 self._show_merge_list()
5764 msg = "Error: The above package list contains " + \
5765 "packages which cannot be installed " + \
5766 "at the same time on the same system."
5767 prefix = colorize("BAD", " * ")
5768 portage.writemsg("\n", noiselevel=-1)
5769 for line in textwrap.wrap(msg, 70):
5770 portage.writemsg(prefix + line + "\n", noiselevel=-1)
5772 # Display the conflicting packages along with the packages
5773 # that pulled them in. This is helpful for troubleshooting
5774 # cases in which blockers don't solve automatically and
5775 # the reasons are not apparent from the normal merge list
5779 for blocker in blockers:
5780 for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
5781 self._dynamic_config._blocker_parents.parent_nodes(blocker)):
5782 parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
5783 if not parent_atoms:
5784 atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
5785 if atom is not None:
5786 parent_atoms = set([("@selected", atom)])
5788 conflict_pkgs[pkg] = parent_atoms
5791 # Reduce noise by pruning packages that are only
5792 # pulled in by other conflict packages.
5794 for pkg, parent_atoms in conflict_pkgs.items():
5795 relevant_parent = False
5796 for parent, atom in parent_atoms:
5797 if parent not in conflict_pkgs:
5798 relevant_parent = True
5800 if not relevant_parent:
5801 pruned_pkgs.add(pkg)
5802 for pkg in pruned_pkgs:
5803 del conflict_pkgs[pkg]
5809 for pkg, parent_atoms in conflict_pkgs.items():
5811 # Prefer packages that are not directly involved in a conflict.
5812 # It can be essential to see all the packages here, so don't
5813 # omit any. If the list is long, people can simply use a pager.
5814 preferred_parents = set()
5815 for parent_atom in parent_atoms:
5816 parent, atom = parent_atom
5817 if parent not in conflict_pkgs:
5818 preferred_parents.add(parent_atom)
5820 ordered_list = list(preferred_parents)
5821 if len(parent_atoms) > len(ordered_list):
5822 for parent_atom in parent_atoms:
5823 if parent_atom not in preferred_parents:
5824 ordered_list.append(parent_atom)
5826 msg.append(indent + "%s pulled in by\n" % pkg)
5828 for parent_atom in ordered_list:
5829 parent, atom = parent_atom
5830 msg.append(2*indent)
5831 if isinstance(parent,
5832 (PackageArg, AtomArg)):
5833 # For PackageArg and AtomArg types, it's
5834 # redundant to display the atom attribute.
5835 msg.append(str(parent))
5837 # Display the specific atom from SetArg or
5839 msg.append("%s required by %s" % (atom, parent))
5844 writemsg("".join(msg), noiselevel=-1)
5846 if "--quiet" not in self._frozen_config.myopts:
5847 show_blocker_docs_link()
5849 def display(self, mylist, favorites=[], verbosity=None):
5851 # This is used to prevent display_problems() from
5852 # redundantly displaying this exact same merge list
5853 # again via _show_merge_list().
5854 self._dynamic_config._displayed_list = mylist
5857 return display(self, mylist, favorites, verbosity)
5859 def _display_autounmask(self):
5861 Display --autounmask message and optionally write it to config files
5862 (using CONFIG_PROTECT). The message includes the comments and the changes.
5865 autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
5866 autounmask_unrestricted_atoms = \
5867 self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
5868 quiet = "--quiet" in self._frozen_config.myopts
5869 pretend = "--pretend" in self._frozen_config.myopts
5870 ask = "--ask" in self._frozen_config.myopts
5871 enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
5873 def check_if_latest(pkg):
5875 is_latest_in_slot = True
5876 dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
5877 root_config = self._frozen_config.roots[pkg.root]
5879 for db, pkg_type, built, installed, db_keys in dbs:
5880 for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
5881 if other_pkg.cp != pkg.cp:
5882 # old-style PROVIDE virtual means there are no
5883 # normal matches for this pkg_type
5887 if other_pkg.slot_atom == pkg.slot_atom:
5888 is_latest_in_slot = False
5891 # iter_match_pkgs yields highest version first, so
5892 # there's no need to search this pkg_type any further
5895 if not is_latest_in_slot:
5898 return is_latest, is_latest_in_slot
5900 #Set of roots we have autounmask changes for.
5903 masked_by_missing_keywords = False
5904 unstable_keyword_msg = {}
5905 for pkg in self._dynamic_config._needed_unstable_keywords:
5906 self._show_merge_list()
5907 if pkg in self._dynamic_config.digraph:
5910 unstable_keyword_msg.setdefault(root, [])
5911 is_latest, is_latest_in_slot = check_if_latest(pkg)
5912 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5913 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5914 use=self._pkg_use_enabled(pkg))
5915 for reason in mreasons:
5916 if reason.unmask_hint and \
5917 reason.unmask_hint.key == 'unstable keyword':
5918 keyword = reason.unmask_hint.value
5920 masked_by_missing_keywords = True
5922 unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
5923 if autounmask_unrestricted_atoms:
5925 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
5926 elif is_latest_in_slot:
5927 unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
5929 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5931 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
5933 p_mask_change_msg = {}
5934 for pkg in self._dynamic_config._needed_p_mask_changes:
5935 self._show_merge_list()
5936 if pkg in self._dynamic_config.digraph:
5939 p_mask_change_msg.setdefault(root, [])
5940 is_latest, is_latest_in_slot = check_if_latest(pkg)
5941 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
5942 mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
5943 use=self._pkg_use_enabled(pkg))
5944 for reason in mreasons:
5945 if reason.unmask_hint and \
5946 reason.unmask_hint.key == 'p_mask':
5947 keyword = reason.unmask_hint.value
5949 comment, filename = portage.getmaskingreason(
5950 pkg.cpv, metadata=pkg.metadata,
5951 settings=pkgsettings,
5952 portdb=pkg.root_config.trees["porttree"].dbapi,
5953 return_location=True)
5955 p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
5957 p_mask_change_msg[root].append("# %s:\n" % filename)
5959 comment = [line for line in
5960 comment.splitlines() if line]
5961 for line in comment:
5962 p_mask_change_msg[root].append("%s\n" % line)
5963 if autounmask_unrestricted_atoms:
5965 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
5966 elif is_latest_in_slot:
5967 p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
5969 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5971 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
5973 use_changes_msg = {}
5974 for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
5975 self._show_merge_list()
5976 if pkg in self._dynamic_config.digraph:
5979 use_changes_msg.setdefault(root, [])
5980 is_latest, is_latest_in_slot = check_if_latest(pkg)
5981 changes = needed_use_config_change[1]
5983 for flag, state in changes.items():
5985 adjustments.append(flag)
5987 adjustments.append("-" + flag)
5988 use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
5990 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5991 elif is_latest_in_slot:
5992 use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
5994 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
5997 for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
5998 self._show_merge_list()
5999 if pkg in self._dynamic_config.digraph:
6002 license_msg.setdefault(root, [])
6003 is_latest, is_latest_in_slot = check_if_latest(pkg)
6005 license_msg[root].append(self._get_dep_chain_as_comment(pkg))
6007 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6008 elif is_latest_in_slot:
6009 license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
6011 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
6013 def find_config_file(abs_user_config, file_name):
6015 Searches /etc/portage for an appropriate file to append changes to.
6016 If the file_name is a file it is returned, if it is a directory, the
6017 last file in it is returned. Order of traversal is the identical to
6018 portage.util.grablines(recursive=True).
6020 file_name - String containing a file name like "package.use"
6021 return value - String. Absolute path of file to write to. None if
6022 no suitable file exists.
6024 file_path = os.path.join(abs_user_config, file_name)
6028 except OSError as e:
6029 if e.errno == errno.ENOENT:
6030 # The file doesn't exist, so we'll
6034 # Disk or file system trouble?
6037 last_file_path = None
6046 if stat.S_ISREG(st.st_mode):
6048 elif stat.S_ISDIR(st.st_mode):
6049 if os.path.basename(p) in _ignorecvs_dirs:
6052 contents = os.listdir(p)
6056 contents.sort(reverse=True)
6057 for child in contents:
6058 if child.startswith(".") or \
6059 child.endswith("~"):
6061 stack.append(os.path.join(p, child))
6063 return last_file_path
6065 write_to_file = autounmask_write and not pretend
6066 #Make sure we have a file to write to before doing any write.
6067 file_to_write_to = {}
6071 settings = self._frozen_config.roots[root].settings
6072 abs_user_config = os.path.join(
6073 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6075 if root in unstable_keyword_msg:
6076 if not os.path.exists(os.path.join(abs_user_config,
6077 "package.keywords")):
6078 filename = "package.accept_keywords"
6080 filename = "package.keywords"
6081 file_to_write_to[(abs_user_config, "package.keywords")] = \
6082 find_config_file(abs_user_config, filename)
6084 if root in p_mask_change_msg:
6085 file_to_write_to[(abs_user_config, "package.unmask")] = \
6086 find_config_file(abs_user_config, "package.unmask")
6088 if root in use_changes_msg:
6089 file_to_write_to[(abs_user_config, "package.use")] = \
6090 find_config_file(abs_user_config, "package.use")
6092 if root in license_msg:
6093 file_to_write_to[(abs_user_config, "package.license")] = \
6094 find_config_file(abs_user_config, "package.license")
6096 for (abs_user_config, f), path in file_to_write_to.items():
6098 problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
6100 write_to_file = not problems
6102 def format_msg(lines):
6104 for i, line in enumerate(lines):
6105 if line.startswith("#"):
6107 lines[i] = colorize("INFORM", line.rstrip()) + "\n"
6108 return "".join(lines)
6111 settings = self._frozen_config.roots[root].settings
6112 abs_user_config = os.path.join(
6113 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6116 writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
6118 if root in unstable_keyword_msg:
6119 writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
6120 " are necessary to proceed:\n", noiselevel=-1)
6121 writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
6123 if root in p_mask_change_msg:
6124 writemsg("\nThe following " + colorize("BAD", "mask changes") + \
6125 " are necessary to proceed:\n", noiselevel=-1)
6126 writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
6128 if root in use_changes_msg:
6129 writemsg("\nThe following " + colorize("BAD", "USE changes") + \
6130 " are necessary to proceed:\n", noiselevel=-1)
6131 writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
6133 if root in license_msg:
6134 writemsg("\nThe following " + colorize("BAD", "license changes") + \
6135 " are necessary to proceed:\n", noiselevel=-1)
6136 writemsg(format_msg(license_msg[root]), noiselevel=-1)
6141 settings = self._frozen_config.roots[root].settings
6142 protect_obj[root] = ConfigProtect(settings["EROOT"], \
6143 shlex_split(settings.get("CONFIG_PROTECT", "")),
6144 shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
6146 def write_changes(root, changes, file_to_write_to):
6147 file_contents = None
6149 file_contents = io.open(
6150 _unicode_encode(file_to_write_to,
6151 encoding=_encodings['fs'], errors='strict'),
6152 mode='r', encoding=_encodings['content'],
6153 errors='replace').readlines()
6154 except IOError as e:
6155 if e.errno == errno.ENOENT:
6158 problems.append("!!! Failed to read '%s': %s\n" % \
6159 (file_to_write_to, e))
6160 if file_contents is not None:
6161 file_contents.extend(changes)
6162 if protect_obj[root].isprotected(file_to_write_to):
6163 # We want to force new_protect_filename to ensure
6164 # that the user will see all our changes via
6165 # dispatch-conf, even if file_to_write_to doesn't
6166 # exist yet, so we specify force=True.
6167 file_to_write_to = new_protect_filename(file_to_write_to,
6170 write_atomic(file_to_write_to, "".join(file_contents))
6171 except PortageException:
6172 problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
6174 if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
6177 "NOTE: The --autounmask-keep-masks option will prevent emerge",
6178 " from creating package.unmask or ** keyword changes."
6182 line = colorize("INFORM", line)
6183 writemsg(line + "\n", noiselevel=-1)
6185 if ask and write_to_file and file_to_write_to:
6186 prompt = "\nWould you like to add these " + \
6187 "changes to your config files?"
6188 if userquery(prompt, enter_invalid) == 'No':
6189 write_to_file = False
6191 if write_to_file and file_to_write_to:
6193 settings = self._frozen_config.roots[root].settings
6194 abs_user_config = os.path.join(
6195 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
6196 ensure_dirs(abs_user_config)
6198 if root in unstable_keyword_msg:
6199 write_changes(root, unstable_keyword_msg[root],
6200 file_to_write_to.get((abs_user_config, "package.keywords")))
6202 if root in p_mask_change_msg:
6203 write_changes(root, p_mask_change_msg[root],
6204 file_to_write_to.get((abs_user_config, "package.unmask")))
6206 if root in use_changes_msg:
6207 write_changes(root, use_changes_msg[root],
6208 file_to_write_to.get((abs_user_config, "package.use")))
6210 if root in license_msg:
6211 write_changes(root, license_msg[root],
6212 file_to_write_to.get((abs_user_config, "package.license")))
6215 writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
6217 writemsg("".join(problems), noiselevel=-1)
6218 elif write_to_file and roots:
6219 writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
6221 elif not pretend and not autounmask_write and roots:
6222 writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
6226 def display_problems(self):
6228 Display problems with the dependency graph such as slot collisions.
6229 This is called internally by display() to show the problems _after_
6230 the merge list where it is most likely to be seen, but if display()
6231 is not going to be called then this method should be called explicitly
6232 to ensure that the user is notified of problems with the graph.
6235 if self._dynamic_config._circular_deps_for_display is not None:
6236 self._show_circular_deps(
6237 self._dynamic_config._circular_deps_for_display)
6239 # The slot conflict display has better noise reduction than
6240 # the unsatisfied blockers display, so skip unsatisfied blockers
6241 # display if there are slot conflicts (see bug #385391).
6242 if self._dynamic_config._slot_collision_info:
6243 self._show_slot_collision_notice()
6244 elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
6245 self._show_unsatisfied_blockers(
6246 self._dynamic_config._unsatisfied_blockers_for_display)
6248 self._show_missed_update()
6250 self._show_ignored_binaries()
6252 self._display_autounmask()
6254 # TODO: Add generic support for "set problem" handlers so that
6255 # the below warnings aren't special cases for world only.
6257 if self._dynamic_config._missing_args:
6258 world_problems = False
6259 if "world" in self._dynamic_config.sets[
6260 self._frozen_config.target_root].sets:
6261 # Filter out indirect members of world (from nested sets)
6262 # since only direct members of world are desired here.
6263 world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
6264 for arg, atom in self._dynamic_config._missing_args:
6265 if arg.name in ("selected", "world") and atom in world_set:
6266 world_problems = True
6270 sys.stderr.write("\n!!! Problems have been " + \
6271 "detected with your world file\n")
6272 sys.stderr.write("!!! Please run " + \
6273 green("emaint --check world")+"\n\n")
6275 if self._dynamic_config._missing_args:
6276 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6277 " Ebuilds for the following packages are either all\n")
6278 sys.stderr.write(colorize("BAD", "!!!") + \
6279 " masked or don't exist:\n")
6280 sys.stderr.write(" ".join(str(atom) for arg, atom in \
6281 self._dynamic_config._missing_args) + "\n")
6283 if self._dynamic_config._pprovided_args:
6285 for arg, atom in self._dynamic_config._pprovided_args:
6286 if isinstance(arg, SetArg):
6288 arg_atom = (atom, atom)
6291 arg_atom = (arg.arg, atom)
6292 refs = arg_refs.setdefault(arg_atom, [])
6293 if parent not in refs:
6296 msg.append(bad("\nWARNING: "))
6297 if len(self._dynamic_config._pprovided_args) > 1:
6298 msg.append("Requested packages will not be " + \
6299 "merged because they are listed in\n")
6301 msg.append("A requested package will not be " + \
6302 "merged because it is listed in\n")
6303 msg.append("package.provided:\n\n")
6304 problems_sets = set()
6305 for (arg, atom), refs in arg_refs.items():
6308 problems_sets.update(refs)
6310 ref_string = ", ".join(["'%s'" % name for name in refs])
6311 ref_string = " pulled in by " + ref_string
6312 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
6314 if "selected" in problems_sets or "world" in problems_sets:
6315 msg.append("This problem can be solved in one of the following ways:\n\n")
6316 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
6317 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
6318 msg.append(" C) Remove offending entries from package.provided.\n\n")
6319 msg.append("The best course of action depends on the reason that an offending\n")
6320 msg.append("package.provided entry exists.\n\n")
6321 sys.stderr.write("".join(msg))
6323 masked_packages = []
6324 for pkg in self._dynamic_config._masked_license_updates:
6325 root_config = pkg.root_config
6326 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6327 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
6328 masked_packages.append((root_config, pkgsettings,
6329 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6331 writemsg("\n" + colorize("BAD", "!!!") + \
6332 " The following updates are masked by LICENSE changes:\n",
6334 show_masked_packages(masked_packages)
6336 writemsg("\n", noiselevel=-1)
6338 masked_packages = []
6339 for pkg in self._dynamic_config._masked_installed:
6340 root_config = pkg.root_config
6341 pkgsettings = self._frozen_config.pkgsettings[pkg.root]
6342 mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
6343 masked_packages.append((root_config, pkgsettings,
6344 pkg.cpv, pkg.repo, pkg.metadata, mreasons))
6346 writemsg("\n" + colorize("BAD", "!!!") + \
6347 " The following installed packages are masked:\n",
6349 show_masked_packages(masked_packages)
6351 writemsg("\n", noiselevel=-1)
6353 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6354 self._show_unsatisfied_dep(*pargs, **kwargs)
6356 def saveNomergeFavorites(self):
6357 """Find atoms in favorites that are not in the mergelist and add them
6358 to the world file if necessary."""
6359 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6360 "--oneshot", "--onlydeps", "--pretend"):
6361 if x in self._frozen_config.myopts:
6363 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6364 world_set = root_config.sets["selected"]
6366 world_locked = False
6367 if hasattr(world_set, "lock"):
6371 if hasattr(world_set, "load"):
6372 world_set.load() # maybe it's changed on disk
6374 args_set = self._dynamic_config.sets[
6375 self._frozen_config.target_root].sets['__non_set_args__']
6376 added_favorites = set()
6377 for x in self._dynamic_config._set_nodes:
6378 if x.operation != "nomerge":
6381 if x.root != root_config.root:
6385 myfavkey = create_world_atom(x, args_set, root_config)
6387 if myfavkey in added_favorites:
6389 added_favorites.add(myfavkey)
6390 except portage.exception.InvalidDependString as e:
6391 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6392 (x.cpv, e), noiselevel=-1)
6393 writemsg("!!! see '%s'\n\n" % os.path.join(
6394 x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
6397 for arg in self._dynamic_config._initial_arg_list:
6398 if not isinstance(arg, SetArg):
6400 if arg.root_config.root != root_config.root:
6403 if k in ("selected", "world") or \
6404 not root_config.sets[k].world_candidate:
6409 all_added.append(SETPREFIX + k)
6410 all_added.extend(added_favorites)
6414 ">>> Recording %s in \"world\" favorites file...\n" % \
6415 colorize("INFORM", str(a)), noiselevel=-1)
6417 world_set.update(all_added)
6422 def _loadResumeCommand(self, resume_data, skip_masked=True,
6425 Add a resume command to the graph and validate it in the process. This
6426 will raise a PackageNotFound exception if a package is not available.
6431 if not isinstance(resume_data, dict):
6434 mergelist = resume_data.get("mergelist")
6435 if not isinstance(mergelist, list):
6438 favorites = resume_data.get("favorites")
6439 if isinstance(favorites, list):
6440 args = self._load_favorites(favorites)
6444 fakedb = self._dynamic_config.mydbapi
6445 serialized_tasks = []
6448 if not (isinstance(x, list) and len(x) == 4):
6450 pkg_type, myroot, pkg_key, action = x
6451 if pkg_type not in self.pkg_tree_map:
6453 if action != "merge":
6455 root_config = self._frozen_config.roots[myroot]
6457 # Use the resume "favorites" list to see if a repo was specified
6459 depgraph_sets = self._dynamic_config.sets[root_config.root]
6461 for atom in depgraph_sets.atoms.getAtoms():
6462 if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
6466 atom = "=" + pkg_key
6468 atom = atom + _repo_separator + repo
6471 atom = Atom(atom, allow_repo=True)
6476 for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
6477 if not self._pkg_visibility_check(pkg) or \
6478 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
6479 modified_use=self._pkg_use_enabled(pkg)):
6484 # It does no exist or it is corrupt.
6486 # TODO: log these somewhere
6488 raise portage.exception.PackageNotFound(pkg_key)
6490 if "merge" == pkg.operation and \
6491 self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
6492 modified_use=self._pkg_use_enabled(pkg)):
6495 if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
6497 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6499 self._dynamic_config._unsatisfied_deps_for_display.append(
6500 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6502 fakedb[myroot].cpv_inject(pkg)
6503 serialized_tasks.append(pkg)
6504 self._spinner_update()
6506 if self._dynamic_config._unsatisfied_deps_for_display:
6509 if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
6510 self._dynamic_config._serialized_tasks_cache = serialized_tasks
6511 self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
6513 self._select_package = self._select_pkg_from_graph
6514 self._dynamic_config.myparams["selective"] = True
6515 # Always traverse deep dependencies in order to account for
6516 # potentially unsatisfied dependencies of installed packages.
6517 # This is necessary for correct --keep-going or --resume operation
6518 # in case a package from a group of circularly dependent packages
6519 # fails. In this case, a package which has recently been installed
6520 # may have an unsatisfied circular dependency (pulled in by
6521 # PDEPEND, for example). So, even though a package is already
6522 # installed, it may not have all of it's dependencies satisfied, so
6523 # it may not be usable. If such a package is in the subgraph of
6524 # deep depenedencies of a scheduled build, that build needs to
6525 # be cancelled. In order for this type of situation to be
6526 # recognized, deep traversal of dependencies is required.
6527 self._dynamic_config.myparams["deep"] = True
6529 for task in serialized_tasks:
6530 if isinstance(task, Package) and \
6531 task.operation == "merge":
6532 if not self._add_pkg(task, None):
6535 # Packages for argument atoms need to be explicitly
6536 # added via _add_pkg() so that they are included in the
6537 # digraph (needed at least for --tree display).
6538 for arg in self._expand_set_args(args, add_to_digraph=True):
6539 for atom in arg.pset.getAtoms():
6540 pkg, existing_node = self._select_package(
6541 arg.root_config.root, atom)
6542 if existing_node is None and \
6544 if not self._add_pkg(pkg, Dependency(atom=atom,
6545 root=pkg.root, parent=arg)):
6548 # Allow unsatisfied deps here to avoid showing a masking
6549 # message for an unsatisfied dep that isn't necessarily
6551 if not self._create_graph(allow_unsatisfied=True):
6554 unsatisfied_deps = []
6555 for dep in self._dynamic_config._unsatisfied_deps:
6556 if not isinstance(dep.parent, Package):
6558 if dep.parent.operation == "merge":
6559 unsatisfied_deps.append(dep)
6562 # For unsatisfied deps of installed packages, only account for
6563 # them if they are in the subgraph of dependencies of a package
6564 # which is scheduled to be installed.
6565 unsatisfied_install = False
6567 dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
6569 node = dep_stack.pop()
6570 if not isinstance(node, Package):
6572 if node.operation == "merge":
6573 unsatisfied_install = True
6575 if node in traversed:
6578 dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
6580 if unsatisfied_install:
6581 unsatisfied_deps.append(dep)
6583 if masked_tasks or unsatisfied_deps:
6584 # This probably means that a required package
6585 # was dropped via --skipfirst. It makes the
6586 # resume list invalid, so convert it to a
6587 # UnsatisfiedResumeDep exception.
6588 raise self.UnsatisfiedResumeDep(self,
6589 masked_tasks + unsatisfied_deps)
6590 self._dynamic_config._serialized_tasks_cache = None
6593 except self._unknown_internal_error:
6598 def _load_favorites(self, favorites):
6600 Use a list of favorites to resume state from a
6601 previous select_files() call. This creates similar
6602 DependencyArg instances to those that would have
6603 been created by the original select_files() call.
6604 This allows Package instances to be matched with
6605 DependencyArg instances during graph creation.
6607 root_config = self._frozen_config.roots[self._frozen_config.target_root]
6608 sets = root_config.sets
6609 depgraph_sets = self._dynamic_config.sets[root_config.root]
6612 if not isinstance(x, basestring):
6614 if x in ("system", "world"):
6616 if x.startswith(SETPREFIX):
6617 s = x[len(SETPREFIX):]
6620 if s in depgraph_sets.sets:
6623 depgraph_sets.sets[s] = pset
6624 args.append(SetArg(arg=x, pset=pset,
6625 root_config=root_config))
6628 x = Atom(x, allow_repo=True)
6629 except portage.exception.InvalidAtom:
6631 args.append(AtomArg(arg=x, atom=x,
6632 root_config=root_config))
6634 self._set_args(args)
6637 class UnsatisfiedResumeDep(portage.exception.PortageException):
6639 A dependency of a resume list is not installed. This
6640 can occur when a required package is dropped from the
6641 merge list via --skipfirst.
6643 def __init__(self, depgraph, value):
6644 portage.exception.PortageException.__init__(self, value)
6645 self.depgraph = depgraph
6647 class _internal_exception(portage.exception.PortageException):
6648 def __init__(self, value=""):
6649 portage.exception.PortageException.__init__(self, value)
6651 class _unknown_internal_error(_internal_exception):
6653 Used by the depgraph internally to terminate graph creation.
6654 The specific reason for the failure should have been dumped
6655 to stderr, unfortunately, the exact reason for the failure
6659 class _serialize_tasks_retry(_internal_exception):
6661 This is raised by the _serialize_tasks() method when it needs to
6662 be called again for some reason. The only case that it's currently
6663 used for is when neglected dependencies need to be added to the
6664 graph in order to avoid making a potentially unsafe decision.
6667 class _backtrack_mask(_internal_exception):
6669 This is raised by _show_unsatisfied_dep() when it's called with
6670 check_backtrack=True and a matching package has been masked by
6674 class _autounmask_breakage(_internal_exception):
6676 This is raised by _show_unsatisfied_dep() when it's called with
6677 check_autounmask_breakage=True and a matching package has been
6678 been disqualified due to autounmask changes.
6681 def need_restart(self):
6682 return self._dynamic_config._need_restart and \
6683 not self._dynamic_config._skip_restart
6685 def success_without_autounmask(self):
6686 return self._dynamic_config._success_without_autounmask
6688 def autounmask_breakage_detected(self):
6690 for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
6691 self._show_unsatisfied_dep(
6692 *pargs, check_autounmask_breakage=True, **kwargs)
6693 except self._autounmask_breakage:
6697 def get_backtrack_infos(self):
6698 return self._dynamic_config._backtrack_infos
6701 class _dep_check_composite_db(dbapi):
6703 A dbapi-like interface that is optimized for use in dep_check() calls.
6704 This is built on top of the existing depgraph package selection logic.
6705 Some packages that have been added to the graph may be masked from this
6706 view in order to influence the atom preference selection that occurs
6709 def __init__(self, depgraph, root):
6710 dbapi.__init__(self)
6711 self._depgraph = depgraph
6713 self._match_cache = {}
6714 self._cpv_pkg_map = {}
6716 def _clear_cache(self):
6717 self._match_cache.clear()
6718 self._cpv_pkg_map.clear()
6720 def cp_list(self, cp):
6722 Emulate cp_list just so it can be used to check for existence
6723 of new-style virtuals. Since it's a waste of time to return
6724 more than one cpv for this use case, a maximum of one cpv will
6727 if isinstance(cp, Atom):
6732 for pkg in self._depgraph._iter_match_pkgs_any(
6733 self._depgraph._frozen_config.roots[self._root], atom):
6740 def match(self, atom):
6741 cache_key = (atom, atom.unevaluated_atom)
6742 ret = self._match_cache.get(cache_key)
6747 pkg, existing = self._depgraph._select_package(self._root, atom)
6749 if pkg is not None and self._visible(pkg):
6750 self._cpv_pkg_map[pkg.cpv] = pkg
6753 if pkg is not None and \
6754 atom.slot is None and \
6755 pkg.cp.startswith("virtual/") and \
6756 (("remove" not in self._depgraph._dynamic_config.myparams and
6757 "--update" not in self._depgraph._frozen_config.myopts) or
6759 not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
6760 # For new-style virtual lookahead that occurs inside dep_check()
6761 # for bug #141118, examine all slots. This is needed so that newer
6762 # slots will not unnecessarily be pulled in when a satisfying lower
6763 # slot is already installed. For example, if virtual/jdk-1.5 is
6764 # satisfied via gcj-jdk then there's no need to pull in a newer
6765 # slot to satisfy a virtual/jdk dependency, unless --update is
6769 for virt_pkg in self._depgraph._iter_match_pkgs_any(
6770 self._depgraph._frozen_config.roots[self._root], atom):
6771 if virt_pkg.cp != pkg.cp:
6773 slots.add(virt_pkg.slot)
6775 slots.remove(pkg.slot)
6777 slot_atom = atom.with_slot(slots.pop())
6778 pkg, existing = self._depgraph._select_package(
6779 self._root, slot_atom)
6782 if not self._visible(pkg):
6784 self._cpv_pkg_map[pkg.cpv] = pkg
6788 self._cpv_sort_ascending(ret)
6790 self._match_cache[cache_key] = ret
6793 def _visible(self, pkg):
6794 if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
6796 arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
6797 except (StopIteration, portage.exception.InvalidDependString):
6801 if pkg.installed and \
6802 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
6803 # Account for packages with masks (like KEYWORDS masks)
6804 # that are usually ignored in visibility checks for
6805 # installed packages, in order to handle cases like
6807 myopts = self._depgraph._frozen_config.myopts
6808 use_ebuild_visibility = myopts.get(
6809 '--use-ebuild-visibility', 'n') != 'n'
6810 avoid_update = "--update" not in myopts and \
6811 "remove" not in self._depgraph._dynamic_config.myparams
6812 usepkgonly = "--usepkgonly" in myopts
6813 if not avoid_update:
6814 if not use_ebuild_visibility and usepkgonly:
6818 pkg_eb = self._depgraph._pkg(
6819 pkg.cpv, "ebuild", pkg.root_config,
6821 except portage.exception.PackageNotFound:
6822 pkg_eb_visible = False
6823 for pkg_eb in self._depgraph._iter_match_pkgs(
6824 pkg.root_config, "ebuild",
6825 Atom("=%s" % (pkg.cpv,))):
6826 if self._depgraph._pkg_visibility_check(pkg_eb):
6827 pkg_eb_visible = True
6829 if not pkg_eb_visible:
6832 if not self._depgraph._pkg_visibility_check(pkg_eb):
6835 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
6836 self._root].get(pkg.slot_atom)
6837 if in_graph is None:
6838 # Mask choices for packages which are not the highest visible
6839 # version within their slot (since they usually trigger slot
6841 highest_visible, in_graph = self._depgraph._select_package(
6842 self._root, pkg.slot_atom)
6843 # Note: highest_visible is not necessarily the real highest
6844 # visible, especially when --update is not enabled, so use
6845 # < operator instead of !=.
6846 if highest_visible is not None and pkg < highest_visible:
6848 elif in_graph != pkg:
6849 # Mask choices for packages that would trigger a slot
6850 # conflict with a previously selected package.
6854 def aux_get(self, cpv, wants):
6855 metadata = self._cpv_pkg_map[cpv].metadata
6856 return [metadata.get(x, "") for x in wants]
6858 def match_pkgs(self, atom):
6859 return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
6861 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
6863 if "--quiet" in myopts:
6864 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6865 writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
6866 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6867 writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
6870 s = search(root_config, spinner, "--searchdesc" in myopts,
6871 "--quiet" not in myopts, "--usepkg" in myopts,
6872 "--usepkgonly" in myopts)
6873 null_cp = portage.dep_getkey(insert_category_into_atom(
6875 cat, atom_pn = portage.catsplit(null_cp)
6876 s.searchkey = atom_pn
6877 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
6880 writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
6881 writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
6883 def _spinner_start(spinner, myopts):
6886 if "--quiet" not in myopts and \
6887 ("--pretend" in myopts or "--ask" in myopts or \
6888 "--tree" in myopts or "--verbose" in myopts):
6890 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
6892 elif "--buildpkgonly" in myopts:
6896 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
6897 if "--unordered-display" in myopts:
6898 portage.writemsg_stdout("\n" + \
6899 darkgreen("These are the packages that " + \
6900 "would be %s:" % action) + "\n\n")
6902 portage.writemsg_stdout("\n" + \
6903 darkgreen("These are the packages that " + \
6904 "would be %s, in reverse order:" % action) + "\n\n")
6906 portage.writemsg_stdout("\n" + \
6907 darkgreen("These are the packages that " + \
6908 "would be %s, in order:" % action) + "\n\n")
6910 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
6911 if not show_spinner:
6912 spinner.update = spinner.update_quiet
6915 portage.writemsg_stdout("Calculating dependencies ")
6917 def _spinner_stop(spinner):
6918 if spinner is None or \
6919 spinner.update == spinner.update_quiet:
6922 if spinner.update != spinner.update_basic:
6923 # update_basic is used for non-tty output,
6924 # so don't output backspaces in that case.
6925 portage.writemsg_stdout("\b\b")
6927 portage.writemsg_stdout("... done!\n")
6929 def backtrack_depgraph(settings, trees, myopts, myparams,
6930 myaction, myfiles, spinner):
6932 Raises PackageSetNotFound if myfiles contains a missing package set.
6934 _spinner_start(spinner, myopts)
6936 return _backtrack_depgraph(settings, trees, myopts, myparams,
6937 myaction, myfiles, spinner)
6939 _spinner_stop(spinner)
6942 def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
6944 debug = "--debug" in myopts
6946 max_retries = myopts.get('--backtrack', 10)
6947 max_depth = max(1, (max_retries + 1) / 2)
6948 allow_backtracking = max_retries > 0
6949 backtracker = Backtracker(max_depth)
6952 frozen_config = _frozen_depgraph_config(settings, trees,
6957 if debug and mydepgraph is not None:
6959 "\n\nbacktracking try %s \n\n" % \
6960 backtracked, noiselevel=-1, level=logging.DEBUG)
6961 mydepgraph.display_problems()
6963 backtrack_parameters = backtracker.get()
6965 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6966 frozen_config=frozen_config,
6967 allow_backtracking=allow_backtracking,
6968 backtrack_parameters=backtrack_parameters)
6969 success, favorites = mydepgraph.select_files(myfiles)
6971 if success or mydepgraph.success_without_autounmask():
6973 elif not allow_backtracking:
6975 elif backtracked >= max_retries:
6977 elif mydepgraph.need_restart():
6979 backtracker.feedback(mydepgraph.get_backtrack_infos())
6983 if not (success or mydepgraph.success_without_autounmask()) and backtracked:
6987 "\n\nbacktracking aborted after %s tries\n\n" % \
6988 backtracked, noiselevel=-1, level=logging.DEBUG)
6989 mydepgraph.display_problems()
6991 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
6992 frozen_config=frozen_config,
6993 allow_backtracking=False,
6994 backtrack_parameters=backtracker.get_best_run())
6995 success, favorites = mydepgraph.select_files(myfiles)
6997 if not success and mydepgraph.autounmask_breakage_detected():
7000 "\n\nautounmask breakage detected\n\n",
7001 noiselevel=-1, level=logging.DEBUG)
7002 mydepgraph.display_problems()
7003 myopts["--autounmask"] = "n"
7004 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
7005 frozen_config=frozen_config, allow_backtracking=False)
7006 success, favorites = mydepgraph.select_files(myfiles)
7008 return (success, mydepgraph, favorites)
7011 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7013 Raises PackageSetNotFound if myfiles contains a missing package set.
7015 _spinner_start(spinner, myopts)
7017 return _resume_depgraph(settings, trees, mtimedb, myopts,
7020 _spinner_stop(spinner)
7022 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
7024 Construct a depgraph for the given resume list. This will raise
7025 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
7026 TODO: Return reasons for dropped_tasks, for display/logging.
7028 @return: (success, depgraph, dropped_tasks)
7031 skip_unsatisfied = True
7032 mergelist = mtimedb["resume"]["mergelist"]
7033 dropped_tasks = set()
7034 frozen_config = _frozen_depgraph_config(settings, trees,
7037 mydepgraph = depgraph(settings, trees,
7038 myopts, myparams, spinner, frozen_config=frozen_config)
7040 success = mydepgraph._loadResumeCommand(mtimedb["resume"],
7041 skip_masked=skip_masked)
7042 except depgraph.UnsatisfiedResumeDep as e:
7043 if not skip_unsatisfied:
7046 graph = mydepgraph._dynamic_config.digraph
7047 unsatisfied_parents = dict((dep.parent, dep.parent) \
7049 traversed_nodes = set()
7050 unsatisfied_stack = list(unsatisfied_parents)
7051 while unsatisfied_stack:
7052 pkg = unsatisfied_stack.pop()
7053 if pkg in traversed_nodes:
7055 traversed_nodes.add(pkg)
7057 # If this package was pulled in by a parent
7058 # package scheduled for merge, removing this
7059 # package may cause the the parent package's
7060 # dependency to become unsatisfied.
7061 for parent_node in graph.parent_nodes(pkg):
7062 if not isinstance(parent_node, Package) \
7063 or parent_node.operation not in ("merge", "nomerge"):
7065 # We need to traverse all priorities here, in order to
7066 # ensure that a package with an unsatisfied depenedency
7067 # won't get pulled in, even indirectly via a soft
7069 unsatisfied_parents[parent_node] = parent_node
7070 unsatisfied_stack.append(parent_node)
7072 unsatisfied_tuples = frozenset(tuple(parent_node)
7073 for parent_node in unsatisfied_parents
7074 if isinstance(parent_node, Package))
7075 pruned_mergelist = []
7077 if isinstance(x, list) and \
7078 tuple(x) not in unsatisfied_tuples:
7079 pruned_mergelist.append(x)
7081 # If the mergelist doesn't shrink then this loop is infinite.
7082 if len(pruned_mergelist) == len(mergelist):
7083 # This happens if a package can't be dropped because
7084 # it's already installed, but it has unsatisfied PDEPEND.
7086 mergelist[:] = pruned_mergelist
7088 # Exclude installed packages that have been removed from the graph due
7089 # to failure to build/install runtime dependencies after the dependent
7090 # package has already been installed.
7091 dropped_tasks.update(pkg for pkg in \
7092 unsatisfied_parents if pkg.operation != "nomerge")
7094 del e, graph, traversed_nodes, \
7095 unsatisfied_parents, unsatisfied_stack
7099 return (success, mydepgraph, dropped_tasks)
7101 def get_mask_info(root_config, cpv, pkgsettings,
7102 db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
7104 metadata = dict(zip(db_keys,
7105 db.aux_get(cpv, db_keys, myrepo=myrepo)))
7109 if metadata is None:
7110 mreasons = ["corruption"]
7112 eapi = metadata['EAPI']
7113 if not portage.eapi_is_supported(eapi):
7114 mreasons = ['EAPI %s' % eapi]
7116 pkg = Package(type_name=pkg_type, root_config=root_config,
7117 cpv=cpv, built=built, installed=installed, metadata=metadata)
7120 if _pkg_use_enabled is not None:
7121 modified_use = _pkg_use_enabled(pkg)
7123 mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
7125 return metadata, mreasons
7127 def show_masked_packages(masked_packages):
7128 shown_licenses = set()
7129 shown_comments = set()
7130 # Maybe there is both an ebuild and a binary. Only
7131 # show one of them to avoid redundant appearance.
7133 have_eapi_mask = False
7134 for (root_config, pkgsettings, cpv, repo,
7135 metadata, mreasons) in masked_packages:
7138 output_cpv += _repo_separator + repo
7139 if output_cpv in shown_cpvs:
7141 shown_cpvs.add(output_cpv)
7142 eapi_masked = metadata is not None and \
7143 not portage.eapi_is_supported(metadata["EAPI"])
7145 have_eapi_mask = True
7146 # When masked by EAPI, metadata is mostly useless since
7147 # it doesn't contain essential things like SLOT.
7149 comment, filename = None, None
7150 if not eapi_masked and \
7151 "package.mask" in mreasons:
7152 comment, filename = \
7153 portage.getmaskingreason(
7154 cpv, metadata=metadata,
7155 settings=pkgsettings,
7156 portdb=root_config.trees["porttree"].dbapi,
7157 return_location=True)
7158 missing_licenses = []
7159 if not eapi_masked and metadata is not None:
7161 missing_licenses = \
7162 pkgsettings._getMissingLicenses(
7164 except portage.exception.InvalidDependString:
7165 # This will have already been reported
7166 # above via mreasons.
7169 writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
7172 if comment and comment not in shown_comments:
7173 writemsg(filename + ":\n" + comment + "\n",
7175 shown_comments.add(comment)
7176 portdb = root_config.trees["porttree"].dbapi
7177 for l in missing_licenses:
7178 l_path = portdb.findLicensePath(l)
7179 if l in shown_licenses:
7181 msg = ("A copy of the '%s' license" + \
7182 " is located at '%s'.\n\n") % (l, l_path)
7183 writemsg(msg, noiselevel=-1)
7184 shown_licenses.add(l)
7185 return have_eapi_mask
7187 def show_mask_docs():
7188 writemsg("For more information, see the MASKED PACKAGES "
7189 "section in the emerge\n", noiselevel=-1)
7190 writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
7192 def show_blocker_docs_link():
7193 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
7194 writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
7195 writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
7197 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7198 return [mreason.message for \
7199 mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
7201 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
7202 mreasons = _getmaskingstatus(
7203 pkg, settings=pkgsettings,
7204 portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
7206 if not pkg.installed:
7207 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
7208 mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
7209 pkg.metadata["CHOST"]))
7212 for msgs in pkg.invalid.values():
7215 _MaskReason("invalid", "invalid: %s" % (msg,)))
7217 if not pkg.metadata["SLOT"]:
7219 _MaskReason("invalid", "SLOT: undefined"))