2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
20 from os import path as osp
21 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
24 from portage import digraph
25 from portage.const import NEWS_LIB_PATH
26 from portage.cache.mappings import slot_dict_class
29 import portage.xpak, commands, errno, re, socket, time
30 from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
31 nc_len, red, teal, turquoise, \
32 xtermTitleReset, yellow
33 from portage.output import create_color_func
34 good = create_color_func("GOOD")
35 bad = create_color_func("BAD")
39 portage.dep._dep_check_strict = True
42 import portage.exception
43 from portage.cache.cache_errors import CacheError
44 from portage.data import secpass
45 from portage.elog.messages import eerror
46 from portage.util import normalize_path as normpath
47 from portage.util import cmp_sort_key, writemsg, writemsg_level
48 from portage.sets import load_default_config, SETPREFIX
49 from portage.sets.base import InternalPackageSet
51 from itertools import chain, izip
53 from _emerge.SlotObject import SlotObject
54 from _emerge.DepPriority import DepPriority
55 from _emerge.BlockerDepPriority import BlockerDepPriority
56 from _emerge.UnmergeDepPriority import UnmergeDepPriority
57 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
58 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
59 from _emerge.Package import Package
60 from _emerge.Blocker import Blocker
61 from _emerge.BlockerDB import BlockerDB
62 from _emerge.EbuildFetcher import EbuildFetcher
63 from _emerge.EbuildPhase import EbuildPhase
64 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
65 from _emerge.PackageMerge import PackageMerge
66 from _emerge.DependencyArg import DependencyArg
67 from _emerge.AtomArg import AtomArg
68 from _emerge.PackageArg import PackageArg
69 from _emerge.SetArg import SetArg
70 from _emerge.Dependency import Dependency
71 from _emerge.BlockerCache import BlockerCache
72 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
73 from _emerge.RepoDisplay import RepoDisplay
74 from _emerge.UseFlagDisplay import UseFlagDisplay
75 from _emerge.SequentialTaskQueue import SequentialTaskQueue
76 from _emerge.ProgressHandler import ProgressHandler
77 from _emerge.stdout_spinner import stdout_spinner
78 from _emerge.JobStatusDisplay import JobStatusDisplay
79 from _emerge.PollScheduler import PollScheduler
80 from _emerge.search import search
81 from _emerge.visible import visible
82 from _emerge.emergelog import emergelog, _emerge_log_dir
83 from _emerge.userquery import userquery
84 from _emerge.countdown import countdown
85 from _emerge.unmerge import unmerge
86 from _emerge.MergeListItem import MergeListItem
87 from _emerge.MetadataRegen import MetadataRegen
88 from _emerge.RootConfig import RootConfig
89 from _emerge.format_size import format_size
90 from _emerge.PackageCounters import PackageCounters
91 from _emerge.FakeVartree import FakeVartree
92 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
95 "clean", "config", "depclean",
96 "info", "list-sets", "metadata",
97 "prune", "regen", "search",
98 "sync", "unmerge", "version",
101 "--ask", "--alphabetical",
102 "--buildpkg", "--buildpkgonly",
103 "--changelog", "--columns",
108 "--fetchonly", "--fetch-all-uri",
109 "--getbinpkg", "--getbinpkgonly",
110 "--help", "--ignore-default-opts",
114 "--nodeps", "--noreplace",
115 "--nospinner", "--oneshot",
116 "--onlydeps", "--pretend",
117 "--quiet", "--resume",
118 "--searchdesc", "--selective",
122 "--usepkg", "--usepkgonly",
129 "b":"--buildpkg", "B":"--buildpkgonly",
130 "c":"--clean", "C":"--unmerge",
131 "d":"--debug", "D":"--deep",
133 "f":"--fetchonly", "F":"--fetch-all-uri",
134 "g":"--getbinpkg", "G":"--getbinpkgonly",
136 "k":"--usepkg", "K":"--usepkgonly",
138 "n":"--noreplace", "N":"--newuse",
139 "o":"--onlydeps", "O":"--nodeps",
140 "p":"--pretend", "P":"--prune",
142 "s":"--search", "S":"--searchdesc",
145 "v":"--verbose", "V":"--version"
148 def getgccversion(chost):
151 return: the current in-use gcc version
154 gcc_ver_command = 'gcc -dumpversion'
155 gcc_ver_prefix = 'gcc-'
157 gcc_not_found_error = red(
158 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
159 "!!! to update the environment of this terminal and possibly\n" +
160 "!!! other terminals also.\n"
163 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
164 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
165 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
167 mystatus, myoutput = commands.getstatusoutput(
168 chost + "-" + gcc_ver_command)
169 if mystatus == os.EX_OK:
170 return gcc_ver_prefix + myoutput
172 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
173 if mystatus == os.EX_OK:
174 return gcc_ver_prefix + myoutput
176 portage.writemsg(gcc_not_found_error, noiselevel=-1)
177 return "[unavailable]"
179 def getportageversion(portdir, target_root, profile, chost, vardb):
180 profilever = "unavailable"
182 realpath = os.path.realpath(profile)
183 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
184 if realpath.startswith(basepath):
185 profilever = realpath[1 + len(basepath):]
188 profilever = "!" + os.readlink(profile)
191 del realpath, basepath
194 libclist = vardb.match("virtual/libc")
195 libclist += vardb.match("virtual/glibc")
196 libclist = portage.util.unique_array(libclist)
198 xs=portage.catpkgsplit(x)
200 libcver+=","+"-".join(xs[1:])
202 libcver="-".join(xs[1:])
204 libcver="unavailable"
206 gccver = getgccversion(chost)
207 unameout=platform.release()+" "+platform.machine()
209 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
211 def create_depgraph_params(myopts, myaction):
212 #configure emerge engine parameters
214 # self: include _this_ package regardless of if it is merged.
215 # selective: exclude the package if it is merged
216 # recurse: go into the dependencies
217 # deep: go into the dependencies of already merged packages
218 # empty: pretend nothing is merged
219 # complete: completely account for all known dependencies
220 # remove: build graph for use in removing packages
221 myparams = set(["recurse"])
223 if myaction == "remove":
224 myparams.add("remove")
225 myparams.add("complete")
228 if "--update" in myopts or \
229 "--newuse" in myopts or \
230 "--reinstall" in myopts or \
231 "--noreplace" in myopts:
232 myparams.add("selective")
233 if "--emptytree" in myopts:
234 myparams.add("empty")
235 myparams.discard("selective")
236 if "--nodeps" in myopts:
237 myparams.discard("recurse")
238 if "--deep" in myopts:
240 if "--complete-graph" in myopts:
241 myparams.add("complete")
244 def create_world_atom(pkg, args_set, root_config):
245 """Create a new atom for the world file if one does not exist. If the
246 argument atom is precise enough to identify a specific slot then a slot
247 atom will be returned. Atoms that are in the system set may also be stored
248 in world since system atoms can only match one slot while world atoms can
249 be greedy with respect to slots. Unslotted system packages will not be
252 arg_atom = args_set.findAtomForPackage(pkg)
255 cp = portage.dep_getkey(arg_atom)
257 sets = root_config.sets
258 portdb = root_config.trees["porttree"].dbapi
259 vardb = root_config.trees["vartree"].dbapi
260 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
261 for cpv in portdb.match(cp))
262 slotted = len(available_slots) > 1 or \
263 (len(available_slots) == 1 and "0" not in available_slots)
265 # check the vdb in case this is multislot
266 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
267 for cpv in vardb.match(cp))
268 slotted = len(available_slots) > 1 or \
269 (len(available_slots) == 1 and "0" not in available_slots)
270 if slotted and arg_atom != cp:
271 # If the user gave a specific atom, store it as a
272 # slot atom in the world file.
273 slot_atom = pkg.slot_atom
275 # For USE=multislot, there are a couple of cases to
278 # 1) SLOT="0", but the real SLOT spontaneously changed to some
279 # unknown value, so just record an unslotted atom.
281 # 2) SLOT comes from an installed package and there is no
282 # matching SLOT in the portage tree.
284 # Make sure that the slot atom is available in either the
285 # portdb or the vardb, since otherwise the user certainly
286 # doesn't want the SLOT atom recorded in the world file
287 # (case 1 above). If it's only available in the vardb,
288 # the user may be trying to prevent a USE=multislot
289 # package from being removed by --depclean (case 2 above).
292 if not portdb.match(slot_atom):
293 # SLOT seems to come from an installed multislot package
295 # If there is no installed package matching the SLOT atom,
296 # it probably changed SLOT spontaneously due to USE=multislot,
297 # so just record an unslotted atom.
298 if vardb.match(slot_atom):
299 # Now verify that the argument is precise
300 # enough to identify a specific slot.
301 matches = mydb.match(arg_atom)
302 matched_slots = set()
304 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
305 if len(matched_slots) == 1:
306 new_world_atom = slot_atom
308 if new_world_atom == sets["world"].findAtomForPackage(pkg):
309 # Both atoms would be identical, so there's nothing to add.
312 # Unlike world atoms, system atoms are not greedy for slots, so they
313 # can't be safely excluded from world if they are slotted.
314 system_atom = sets["system"].findAtomForPackage(pkg)
316 if not portage.dep_getkey(system_atom).startswith("virtual/"):
318 # System virtuals aren't safe to exclude from world since they can
319 # match multiple old-style virtuals but only one of them will be
320 # pulled in by update or depclean.
321 providers = portdb.mysettings.getvirtuals().get(
322 portage.dep_getkey(system_atom))
323 if providers and len(providers) == 1 and providers[0] == cp:
325 return new_world_atom
327 def filter_iuse_defaults(iuse):
329 if flag.startswith("+") or flag.startswith("-"):
334 def _find_deep_system_runtime_deps(graph):
335 deep_system_deps = set()
338 if not isinstance(node, Package) or \
339 node.operation == 'uninstall':
341 if node.root_config.sets['system'].findAtomForPackage(node):
342 node_stack.append(node)
344 def ignore_priority(priority):
346 Ignore non-runtime priorities.
348 if isinstance(priority, DepPriority) and \
349 (priority.runtime or priority.runtime_post):
354 node = node_stack.pop()
355 if node in deep_system_deps:
357 deep_system_deps.add(node)
358 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
359 if not isinstance(child, Package) or \
360 child.operation == 'uninstall':
362 node_stack.append(child)
364 return deep_system_deps
366 def get_masking_status(pkg, pkgsettings, root_config):
368 mreasons = portage.getmaskingstatus(
369 pkg, settings=pkgsettings,
370 portdb=root_config.trees["porttree"].dbapi)
372 if not pkg.installed:
373 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
374 mreasons.append("CHOST: %s" % \
375 pkg.metadata["CHOST"])
377 if not pkg.metadata["SLOT"]:
378 mreasons.append("invalid: SLOT is undefined")
382 def get_mask_info(root_config, cpv, pkgsettings,
383 db, pkg_type, built, installed, db_keys):
386 metadata = dict(izip(db_keys,
387 db.aux_get(cpv, db_keys)))
390 if metadata and not built:
391 pkgsettings.setcpv(cpv, mydb=metadata)
392 metadata["USE"] = pkgsettings["PORTAGE_USE"]
393 metadata['CHOST'] = pkgsettings.get('CHOST', '')
395 mreasons = ["corruption"]
397 eapi = metadata['EAPI']
400 if not portage.eapi_is_supported(eapi):
401 mreasons = ['EAPI %s' % eapi]
403 pkg = Package(type_name=pkg_type, root_config=root_config,
404 cpv=cpv, built=built, installed=installed, metadata=metadata)
405 mreasons = get_masking_status(pkg, pkgsettings, root_config)
406 return metadata, mreasons
408 def show_masked_packages(masked_packages):
409 shown_licenses = set()
410 shown_comments = set()
411 # Maybe there is both an ebuild and a binary. Only
412 # show one of them to avoid redundant appearance.
414 have_eapi_mask = False
415 for (root_config, pkgsettings, cpv,
416 metadata, mreasons) in masked_packages:
417 if cpv in shown_cpvs:
420 comment, filename = None, None
421 if "package.mask" in mreasons:
422 comment, filename = \
423 portage.getmaskingreason(
424 cpv, metadata=metadata,
425 settings=pkgsettings,
426 portdb=root_config.trees["porttree"].dbapi,
427 return_location=True)
428 missing_licenses = []
430 if not portage.eapi_is_supported(metadata["EAPI"]):
431 have_eapi_mask = True
434 pkgsettings._getMissingLicenses(
436 except portage.exception.InvalidDependString:
437 # This will have already been reported
438 # above via mreasons.
441 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
442 if comment and comment not in shown_comments:
445 shown_comments.add(comment)
446 portdb = root_config.trees["porttree"].dbapi
447 for l in missing_licenses:
448 l_path = portdb.findLicensePath(l)
449 if l in shown_licenses:
451 msg = ("A copy of the '%s' license" + \
452 " is located at '%s'.") % (l, l_path)
455 shown_licenses.add(l)
456 return have_eapi_mask
458 class depgraph(object):
460 pkg_tree_map = RootConfig.pkg_tree_map
462 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
464 def __init__(self, settings, trees, myopts, myparams, spinner):
465 self.settings = settings
466 self.target_root = settings["ROOT"]
468 self.myparams = myparams
470 if settings.get("PORTAGE_DEBUG", "") == "1":
472 self.spinner = spinner
473 self._running_root = trees["/"]["root_config"]
474 self._opts_no_restart = Scheduler._opts_no_restart
475 self.pkgsettings = {}
476 # Maps slot atom to package for each Package added to the graph.
477 self._slot_pkg_map = {}
478 # Maps nodes to the reasons they were selected for reinstallation.
479 self._reinstall_nodes = {}
482 self._trees_orig = trees
484 # Contains a filtered view of preferred packages that are selected
485 # from available repositories.
486 self._filtered_trees = {}
487 # Contains installed packages and new packages that have been added
489 self._graph_trees = {}
490 # All Package instances
493 self.trees[myroot] = {}
494 # Create a RootConfig instance that references
495 # the FakeVartree instead of the real one.
496 self.roots[myroot] = RootConfig(
497 trees[myroot]["vartree"].settings,
499 trees[myroot]["root_config"].setconfig)
500 for tree in ("porttree", "bintree"):
501 self.trees[myroot][tree] = trees[myroot][tree]
502 self.trees[myroot]["vartree"] = \
503 FakeVartree(trees[myroot]["root_config"],
504 pkg_cache=self._pkg_cache)
505 self.pkgsettings[myroot] = portage.config(
506 clone=self.trees[myroot]["vartree"].settings)
507 self._slot_pkg_map[myroot] = {}
508 vardb = self.trees[myroot]["vartree"].dbapi
509 preload_installed_pkgs = "--nodeps" not in self.myopts and \
510 "--buildpkgonly" not in self.myopts
511 # This fakedbapi instance will model the state that the vdb will
512 # have after new packages have been installed.
513 fakedb = PackageVirtualDbapi(vardb.settings)
514 if preload_installed_pkgs:
516 self.spinner.update()
517 # This triggers metadata updates via FakeVartree.
518 vardb.aux_get(pkg.cpv, [])
519 fakedb.cpv_inject(pkg)
521 # Now that the vardb state is cached in our FakeVartree,
522 # we won't be needing the real vartree cache for awhile.
523 # To make some room on the heap, clear the vardbapi
525 trees[myroot]["vartree"].dbapi._clear_cache()
528 self.mydbapi[myroot] = fakedb
531 graph_tree.dbapi = fakedb
532 self._graph_trees[myroot] = {}
533 self._filtered_trees[myroot] = {}
534 # Substitute the graph tree for the vartree in dep_check() since we
535 # want atom selections to be consistent with package selections
536 # have already been made.
537 self._graph_trees[myroot]["porttree"] = graph_tree
538 self._graph_trees[myroot]["vartree"] = graph_tree
541 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
542 self._filtered_trees[myroot]["porttree"] = filtered_tree
544 # Passing in graph_tree as the vartree here could lead to better
545 # atom selections in some cases by causing atoms for packages that
546 # have been added to the graph to be preferred over other choices.
547 # However, it can trigger atom selections that result in
548 # unresolvable direct circular dependencies. For example, this
549 # happens with gwydion-dylan which depends on either itself or
550 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
551 # gwydion-dylan-bin needs to be selected in order to avoid a
552 # an unresolvable direct circular dependency.
554 # To solve the problem described above, pass in "graph_db" so that
555 # packages that have been added to the graph are distinguishable
556 # from other available packages and installed packages. Also, pass
557 # the parent package into self._select_atoms() calls so that
558 # unresolvable direct circular dependencies can be detected and
559 # avoided when possible.
560 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
561 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
564 portdb = self.trees[myroot]["porttree"].dbapi
565 bindb = self.trees[myroot]["bintree"].dbapi
566 vardb = self.trees[myroot]["vartree"].dbapi
567 # (db, pkg_type, built, installed, db_keys)
568 if "--usepkgonly" not in self.myopts:
569 db_keys = list(portdb._aux_cache_keys)
570 dbs.append((portdb, "ebuild", False, False, db_keys))
571 if "--usepkg" in self.myopts:
572 db_keys = list(bindb._aux_cache_keys)
573 dbs.append((bindb, "binary", True, False, db_keys))
574 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
575 dbs.append((vardb, "installed", True, True, db_keys))
576 self._filtered_trees[myroot]["dbs"] = dbs
577 if "--usepkg" in self.myopts:
578 self.trees[myroot]["bintree"].populate(
579 "--getbinpkg" in self.myopts,
580 "--getbinpkgonly" in self.myopts)
583 self.digraph=portage.digraph()
584 # contains all sets added to the graph
586 # contains atoms given as arguments
587 self._sets["args"] = InternalPackageSet()
588 # contains all atoms from all sets added to the graph, including
589 # atoms given as arguments
590 self._set_atoms = InternalPackageSet()
591 self._atom_arg_map = {}
592 # contains all nodes pulled in by self._set_atoms
593 self._set_nodes = set()
594 # Contains only Blocker -> Uninstall edges
595 self._blocker_uninstalls = digraph()
596 # Contains only Package -> Blocker edges
597 self._blocker_parents = digraph()
598 # Contains only irrelevant Package -> Blocker edges
599 self._irrelevant_blockers = digraph()
600 # Contains only unsolvable Package -> Blocker edges
601 self._unsolvable_blockers = digraph()
602 # Contains all Blocker -> Blocked Package edges
603 self._blocked_pkgs = digraph()
604 # Contains world packages that have been protected from
605 # uninstallation but may not have been added to the graph
606 # if the graph is not complete yet.
607 self._blocked_world_pkgs = {}
608 self._slot_collision_info = {}
609 # Slot collision nodes are not allowed to block other packages since
610 # blocker validation is only able to account for one package per slot.
611 self._slot_collision_nodes = set()
612 self._parent_atoms = {}
613 self._slot_conflict_parent_atoms = set()
614 self._serialized_tasks_cache = None
615 self._scheduler_graph = None
616 self._displayed_list = None
617 self._pprovided_args = []
618 self._missing_args = []
619 self._masked_installed = set()
620 self._unsatisfied_deps_for_display = []
621 self._unsatisfied_blockers_for_display = None
622 self._circular_deps_for_display = None
624 self._dep_disjunctive_stack = []
625 self._unsatisfied_deps = []
626 self._initially_unsatisfied_deps = []
627 self._ignored_deps = []
628 self._required_set_names = set(["system", "world"])
629 self._select_atoms = self._select_atoms_highest_available
630 self._select_package = self._select_pkg_highest_available
631 self._highest_pkg_cache = {}
633 def _show_slot_collision_notice(self):
634 """Show an informational message advising the user to mask one of the
635 the packages. In some cases it may be possible to resolve this
636 automatically, but support for backtracking (removal nodes that have
637 already been selected) will be required in order to handle all possible
641 if not self._slot_collision_info:
644 self._show_merge_list()
647 msg.append("\n!!! Multiple package instances within a single " + \
648 "package slot have been pulled\n")
649 msg.append("!!! into the dependency graph, resulting" + \
650 " in a slot conflict:\n\n")
652 # Max number of parents shown, to avoid flooding the display.
654 explanation_columns = 70
656 for (slot_atom, root), slot_nodes \
657 in self._slot_collision_info.iteritems():
658 msg.append(str(slot_atom))
661 for node in slot_nodes:
663 msg.append(str(node))
664 parent_atoms = self._parent_atoms.get(node)
667 # Prefer conflict atoms over others.
668 for parent_atom in parent_atoms:
669 if len(pruned_list) >= max_parents:
671 if parent_atom in self._slot_conflict_parent_atoms:
672 pruned_list.add(parent_atom)
674 # If this package was pulled in by conflict atoms then
675 # show those alone since those are the most interesting.
677 # When generating the pruned list, prefer instances
678 # of DependencyArg over instances of Package.
679 for parent_atom in parent_atoms:
680 if len(pruned_list) >= max_parents:
682 parent, atom = parent_atom
683 if isinstance(parent, DependencyArg):
684 pruned_list.add(parent_atom)
685 # Prefer Packages instances that themselves have been
686 # pulled into collision slots.
687 for parent_atom in parent_atoms:
688 if len(pruned_list) >= max_parents:
690 parent, atom = parent_atom
691 if isinstance(parent, Package) and \
692 (parent.slot_atom, parent.root) \
693 in self._slot_collision_info:
694 pruned_list.add(parent_atom)
695 for parent_atom in parent_atoms:
696 if len(pruned_list) >= max_parents:
698 pruned_list.add(parent_atom)
699 omitted_parents = len(parent_atoms) - len(pruned_list)
700 parent_atoms = pruned_list
701 msg.append(" pulled in by\n")
702 for parent_atom in parent_atoms:
703 parent, atom = parent_atom
705 if isinstance(parent,
706 (PackageArg, AtomArg)):
707 # For PackageArg and AtomArg types, it's
708 # redundant to display the atom attribute.
709 msg.append(str(parent))
711 # Display the specific atom from SetArg or
713 msg.append("%s required by %s" % (atom, parent))
717 msg.append("(and %d more)\n" % omitted_parents)
719 msg.append(" (no parents)\n")
721 explanation = self._slot_conflict_explanation(slot_nodes)
724 msg.append(indent + "Explanation:\n\n")
725 for line in textwrap.wrap(explanation, explanation_columns):
726 msg.append(2*indent + line + "\n")
729 sys.stderr.write("".join(msg))
732 explanations_for_all = explanations == len(self._slot_collision_info)
734 if explanations_for_all or "--quiet" in self.myopts:
738 msg.append("It may be possible to solve this problem ")
739 msg.append("by using package.mask to prevent one of ")
740 msg.append("those packages from being selected. ")
741 msg.append("However, it is also possible that conflicting ")
742 msg.append("dependencies exist such that they are impossible to ")
743 msg.append("satisfy simultaneously. If such a conflict exists in ")
744 msg.append("the dependencies of two different packages, then those ")
745 msg.append("packages can not be installed simultaneously.")
747 from formatter import AbstractFormatter, DumbWriter
748 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
750 f.add_flowing_data(x)
754 msg.append("For more information, see MASKED PACKAGES ")
755 msg.append("section in the emerge man page or refer ")
756 msg.append("to the Gentoo Handbook.")
758 f.add_flowing_data(x)
762 def _slot_conflict_explanation(self, slot_nodes):
764 When a slot conflict occurs due to USE deps, there are a few
765 different cases to consider:
767 1) New USE are correctly set but --newuse wasn't requested so an
768 installed package with incorrect USE happened to get pulled
769 into graph before the new one.
771 2) New USE are incorrectly set but an installed package has correct
772 USE so it got pulled into the graph, and a new instance also got
773 pulled in due to --newuse or an upgrade.
775 3) Multiple USE deps exist that can't be satisfied simultaneously,
776 and multiple package instances got pulled into the same slot to
777 satisfy the conflicting deps.
779 Currently, explanations and suggested courses of action are generated
780 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
783 if len(slot_nodes) != 2:
784 # Suggestions are only implemented for
785 # conflicts between two packages.
788 all_conflict_atoms = self._slot_conflict_parent_atoms
791 unmatched_node = None
792 for node in slot_nodes:
793 parent_atoms = self._parent_atoms.get(node)
795 # Normally, there are always parent atoms. If there are
796 # none then something unexpected is happening and there's
797 # currently no suggestion for this case.
799 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
800 for parent_atom in conflict_atoms:
801 parent, atom = parent_atom
803 # Suggestions are currently only implemented for cases
804 # in which all conflict atoms have USE deps.
807 if matched_node is not None:
808 # If conflict atoms match multiple nodes
809 # then there's no suggestion.
812 matched_atoms = conflict_atoms
814 if unmatched_node is not None:
815 # Neither node is matched by conflict atoms, and
816 # there is no suggestion for this case.
818 unmatched_node = node
820 if matched_node is None or unmatched_node is None:
821 # This shouldn't happen.
824 if unmatched_node.installed and not matched_node.installed and \
825 unmatched_node.cpv == matched_node.cpv:
826 # If the conflicting packages are the same version then
827 # --newuse should be all that's needed. If they are different
828 # versions then there's some other problem.
829 return "New USE are correctly set, but --newuse wasn't" + \
830 " requested, so an installed package with incorrect USE " + \
831 "happened to get pulled into the dependency graph. " + \
832 "In order to solve " + \
833 "this, either specify the --newuse option or explicitly " + \
834 " reinstall '%s'." % matched_node.slot_atom
836 if matched_node.installed and not unmatched_node.installed:
837 atoms = sorted(set(atom for parent, atom in matched_atoms))
838 explanation = ("New USE for '%s' are incorrectly set. " + \
839 "In order to solve this, adjust USE to satisfy '%s'") % \
840 (matched_node.slot_atom, atoms[0])
842 for atom in atoms[1:-1]:
843 explanation += ", '%s'" % (atom,)
846 explanation += " and '%s'" % (atoms[-1],)
852 def _process_slot_conflicts(self):
854 Process slot conflict data to identify specific atoms which
855 lead to conflict. These atoms only match a subset of the
856 packages that have been pulled into a given slot.
858 for (slot_atom, root), slot_nodes \
859 in self._slot_collision_info.iteritems():
861 all_parent_atoms = set()
862 for pkg in slot_nodes:
863 parent_atoms = self._parent_atoms.get(pkg)
866 all_parent_atoms.update(parent_atoms)
868 for pkg in slot_nodes:
869 parent_atoms = self._parent_atoms.get(pkg)
870 if parent_atoms is None:
872 self._parent_atoms[pkg] = parent_atoms
873 for parent_atom in all_parent_atoms:
874 if parent_atom in parent_atoms:
876 # Use package set for matching since it will match via
877 # PROVIDE when necessary, while match_from_list does not.
878 parent, atom = parent_atom
879 atom_set = InternalPackageSet(
880 initial_atoms=(atom,))
881 if atom_set.findAtomForPackage(pkg):
882 parent_atoms.add(parent_atom)
884 self._slot_conflict_parent_atoms.add(parent_atom)
886 def _reinstall_for_flags(self, forced_flags,
887 orig_use, orig_iuse, cur_use, cur_iuse):
888 """Return a set of flags that trigger reinstallation, or None if there
889 are no such flags."""
890 if "--newuse" in self.myopts:
891 flags = set(orig_iuse.symmetric_difference(
892 cur_iuse).difference(forced_flags))
893 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
894 cur_iuse.intersection(cur_use)))
897 elif "changed-use" == self.myopts.get("--reinstall"):
898 flags = orig_iuse.intersection(orig_use).symmetric_difference(
899 cur_iuse.intersection(cur_use))
904 def _create_graph(self, allow_unsatisfied=False):
905 dep_stack = self._dep_stack
906 dep_disjunctive_stack = self._dep_disjunctive_stack
907 while dep_stack or dep_disjunctive_stack:
908 self.spinner.update()
910 dep = dep_stack.pop()
911 if isinstance(dep, Package):
912 if not self._add_pkg_deps(dep,
913 allow_unsatisfied=allow_unsatisfied):
916 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
918 if dep_disjunctive_stack:
919 if not self._pop_disjunction(allow_unsatisfied):
923 def _add_dep(self, dep, allow_unsatisfied=False):
924 debug = "--debug" in self.myopts
925 buildpkgonly = "--buildpkgonly" in self.myopts
926 nodeps = "--nodeps" in self.myopts
927 empty = "empty" in self.myparams
928 deep = "deep" in self.myparams
929 update = "--update" in self.myopts and dep.depth <= 1
931 if not buildpkgonly and \
933 dep.parent not in self._slot_collision_nodes:
934 if dep.parent.onlydeps:
935 # It's safe to ignore blockers if the
936 # parent is an --onlydeps node.
938 # The blocker applies to the root where
939 # the parent is or will be installed.
940 blocker = Blocker(atom=dep.atom,
941 eapi=dep.parent.metadata["EAPI"],
942 root=dep.parent.root)
943 self._blocker_parents.add(blocker, dep.parent)
945 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
946 onlydeps=dep.onlydeps)
948 if dep.priority.optional:
949 # This could be an unecessary build-time dep
950 # pulled in by --with-bdeps=y.
952 if allow_unsatisfied:
953 self._unsatisfied_deps.append(dep)
955 self._unsatisfied_deps_for_display.append(
956 ((dep.root, dep.atom), {"myparent":dep.parent}))
958 # In some cases, dep_check will return deps that shouldn't
959 # be proccessed any further, so they are identified and
960 # discarded here. Try to discard as few as possible since
961 # discarded dependencies reduce the amount of information
962 # available for optimization of merge order.
963 if dep.priority.satisfied and \
964 not dep_pkg.installed and \
965 not (existing_node or empty or deep or update):
967 if dep.root == self.target_root:
969 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
970 except StopIteration:
972 except portage.exception.InvalidDependString:
973 if not dep_pkg.installed:
974 # This shouldn't happen since the package
975 # should have been masked.
978 self._ignored_deps.append(dep)
981 if not self._add_pkg(dep_pkg, dep):
985 def _add_pkg(self, pkg, dep):
992 myparent = dep.parent
993 priority = dep.priority
996 priority = DepPriority()
998 Fills the digraph with nodes comprised of packages to merge.
999 mybigkey is the package spec of the package to merge.
1000 myparent is the package depending on mybigkey ( or None )
1001 addme = Should we add this package to the digraph or are we just looking at it's deps?
1002 Think --onlydeps, we need to ignore packages in that case.
1005 #IUSE-aware emerge -> USE DEP aware depgraph
1006 #"no downgrade" emerge
1008 # Ensure that the dependencies of the same package
1009 # are never processed more than once.
1010 previously_added = pkg in self.digraph
1012 # select the correct /var database that we'll be checking against
1013 vardbapi = self.trees[pkg.root]["vartree"].dbapi
1014 pkgsettings = self.pkgsettings[pkg.root]
1019 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1020 except portage.exception.InvalidDependString, e:
1021 if not pkg.installed:
1022 show_invalid_depstring_notice(
1023 pkg, pkg.metadata["PROVIDE"], str(e))
1027 if not pkg.onlydeps:
1028 if not pkg.installed and \
1029 "empty" not in self.myparams and \
1030 vardbapi.match(pkg.slot_atom):
1031 # Increase the priority of dependencies on packages that
1032 # are being rebuilt. This optimizes merge order so that
1033 # dependencies are rebuilt/updated as soon as possible,
1034 # which is needed especially when emerge is called by
1035 # revdep-rebuild since dependencies may be affected by ABI
1036 # breakage that has rendered them useless. Don't adjust
1037 # priority here when in "empty" mode since all packages
1038 # are being merged in that case.
1039 priority.rebuild = True
1041 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1042 slot_collision = False
1044 existing_node_matches = pkg.cpv == existing_node.cpv
1045 if existing_node_matches and \
1046 pkg != existing_node and \
1047 dep.atom is not None:
1048 # Use package set for matching since it will match via
1049 # PROVIDE when necessary, while match_from_list does not.
1050 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
1051 if not atom_set.findAtomForPackage(existing_node):
1052 existing_node_matches = False
1053 if existing_node_matches:
1054 # The existing node can be reused.
1056 for parent_atom in arg_atoms:
1057 parent, atom = parent_atom
1058 self.digraph.add(existing_node, parent,
1060 self._add_parent_atom(existing_node, parent_atom)
1061 # If a direct circular dependency is not an unsatisfied
1062 # buildtime dependency then drop it here since otherwise
1063 # it can skew the merge order calculation in an unwanted
1065 if existing_node != myparent or \
1066 (priority.buildtime and not priority.satisfied):
1067 self.digraph.addnode(existing_node, myparent,
1069 if dep.atom is not None and dep.parent is not None:
1070 self._add_parent_atom(existing_node,
1071 (dep.parent, dep.atom))
1075 # A slot collision has occurred. Sometimes this coincides
1076 # with unresolvable blockers, so the slot collision will be
1077 # shown later if there are no unresolvable blockers.
1078 self._add_slot_conflict(pkg)
1079 slot_collision = True
1082 # Now add this node to the graph so that self.display()
1083 # can show use flags and --tree portage.output. This node is
1084 # only being partially added to the graph. It must not be
1085 # allowed to interfere with the other nodes that have been
1086 # added. Do not overwrite data for existing nodes in
1087 # self.mydbapi since that data will be used for blocker
1089 # Even though the graph is now invalid, continue to process
1090 # dependencies so that things like --fetchonly can still
1091 # function despite collisions.
1093 elif not previously_added:
1094 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1095 self.mydbapi[pkg.root].cpv_inject(pkg)
1096 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1098 if not pkg.installed:
1099 # Allow this package to satisfy old-style virtuals in case it
1100 # doesn't already. Any pre-existing providers will be preferred
1103 pkgsettings.setinst(pkg.cpv, pkg.metadata)
1104 # For consistency, also update the global virtuals.
1105 settings = self.roots[pkg.root].settings
1107 settings.setinst(pkg.cpv, pkg.metadata)
1109 except portage.exception.InvalidDependString, e:
1110 show_invalid_depstring_notice(
1111 pkg, pkg.metadata["PROVIDE"], str(e))
1116 self._set_nodes.add(pkg)
1118 # Do this even when addme is False (--onlydeps) so that the
1119 # parent/child relationship is always known in case
1120 # self._show_slot_collision_notice() needs to be called later.
1121 self.digraph.add(pkg, myparent, priority=priority)
1122 if dep.atom is not None and dep.parent is not None:
1123 self._add_parent_atom(pkg, (dep.parent, dep.atom))
1126 for parent_atom in arg_atoms:
1127 parent, atom = parent_atom
1128 self.digraph.add(pkg, parent, priority=priority)
1129 self._add_parent_atom(pkg, parent_atom)
1131 """ This section determines whether we go deeper into dependencies or not.
1132 We want to go deeper on a few occasions:
1133 Installing package A, we need to make sure package A's deps are met.
1134 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1135 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1137 dep_stack = self._dep_stack
1138 if "recurse" not in self.myparams:
1140 elif pkg.installed and \
1141 "deep" not in self.myparams:
1142 dep_stack = self._ignored_deps
1144 self.spinner.update()
1149 if not previously_added:
1150 dep_stack.append(pkg)
1153 def _add_parent_atom(self, pkg, parent_atom):
1154 parent_atoms = self._parent_atoms.get(pkg)
1155 if parent_atoms is None:
1156 parent_atoms = set()
1157 self._parent_atoms[pkg] = parent_atoms
1158 parent_atoms.add(parent_atom)
1160 def _add_slot_conflict(self, pkg):
1161 self._slot_collision_nodes.add(pkg)
1162 slot_key = (pkg.slot_atom, pkg.root)
1163 slot_nodes = self._slot_collision_info.get(slot_key)
1164 if slot_nodes is None:
1166 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
1167 self._slot_collision_info[slot_key] = slot_nodes
1170 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1172 mytype = pkg.type_name
1175 metadata = pkg.metadata
1176 myuse = pkg.use.enabled
1178 depth = pkg.depth + 1
1179 removal_action = "remove" in self.myparams
1182 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1184 edepend[k] = metadata[k]
1186 if not pkg.built and \
1187 "--buildpkgonly" in self.myopts and \
1188 "deep" not in self.myparams and \
1189 "empty" not in self.myparams:
1190 edepend["RDEPEND"] = ""
1191 edepend["PDEPEND"] = ""
1192 bdeps_optional = False
1194 if pkg.built and not removal_action:
1195 if self.myopts.get("--with-bdeps", "n") == "y":
1196 # Pull in build time deps as requested, but marked them as
1197 # "optional" since they are not strictly required. This allows
1198 # more freedom in the merge order calculation for solving
1199 # circular dependencies. Don't convert to PDEPEND since that
1200 # could make --with-bdeps=y less effective if it is used to
1201 # adjust merge order to prevent built_with_use() calls from
1203 bdeps_optional = True
1205 # built packages do not have build time dependencies.
1206 edepend["DEPEND"] = ""
1208 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
1209 edepend["DEPEND"] = ""
1212 root_deps = self.myopts.get("--root-deps")
1213 if root_deps is not None:
1214 if root_deps is True:
1216 elif root_deps == "rdeps":
1217 edepend["DEPEND"] = ""
1220 (bdeps_root, edepend["DEPEND"],
1221 self._priority(buildtime=(not bdeps_optional),
1222 optional=bdeps_optional)),
1223 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
1224 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
1227 debug = "--debug" in self.myopts
1228 strict = mytype != "installed"
1231 portage.dep._dep_check_strict = False
1233 for dep_root, dep_string, dep_priority in deps:
1238 print "Parent: ", jbigkey
1239 print "Depstring:", dep_string
1240 print "Priority:", dep_priority
1244 dep_string = portage.dep.paren_normalize(
1245 portage.dep.use_reduce(
1246 portage.dep.paren_reduce(dep_string),
1247 uselist=pkg.use.enabled))
1249 dep_string = list(self._queue_disjunctive_deps(
1250 pkg, dep_root, dep_priority, dep_string))
1252 except portage.exception.InvalidDependString, e:
1256 show_invalid_depstring_notice(pkg, dep_string, str(e))
1262 dep_string = portage.dep.paren_enclose(dep_string)
1264 if not self._add_pkg_dep_string(
1265 pkg, dep_root, dep_priority, dep_string,
1269 except portage.exception.AmbiguousPackageName, e:
1271 portage.writemsg("\n\n!!! An atom in the dependencies " + \
1272 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1274 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
1275 portage.writemsg("\n", noiselevel=-1)
1276 if mytype == "binary":
1278 "!!! This binary package cannot be installed: '%s'\n" % \
1279 mykey, noiselevel=-1)
1280 elif mytype == "ebuild":
1281 portdb = self.roots[myroot].trees["porttree"].dbapi
1282 myebuild, mylocation = portdb.findname2(mykey)
1283 portage.writemsg("!!! This ebuild cannot be installed: " + \
1284 "'%s'\n" % myebuild, noiselevel=-1)
1285 portage.writemsg("!!! Please notify the package maintainer " + \
1286 "that atoms must be fully-qualified.\n", noiselevel=-1)
1289 portage.dep._dep_check_strict = True
1292 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1294 depth = pkg.depth + 1
1295 debug = "--debug" in self.myopts
1296 strict = pkg.type_name != "installed"
1300 print "Parent: ", pkg
1301 print "Depstring:", dep_string
1302 print "Priority:", dep_priority
1305 selected_atoms = self._select_atoms(dep_root,
1306 dep_string, myuse=pkg.use.enabled, parent=pkg,
1307 strict=strict, priority=dep_priority)
1308 except portage.exception.InvalidDependString, e:
1309 show_invalid_depstring_notice(pkg, dep_string, str(e))
1316 print "Candidates:", selected_atoms
1318 vardb = self.roots[dep_root].trees["vartree"].dbapi
1320 for atom in selected_atoms:
1323 atom = portage.dep.Atom(atom)
1325 mypriority = dep_priority.copy()
1326 if not atom.blocker and vardb.match(atom):
1327 mypriority.satisfied = True
1329 if not self._add_dep(Dependency(atom=atom,
1330 blocker=atom.blocker, depth=depth, parent=pkg,
1331 priority=mypriority, root=dep_root),
1332 allow_unsatisfied=allow_unsatisfied):
1335 except portage.exception.InvalidAtom, e:
1336 show_invalid_depstring_notice(
1337 pkg, dep_string, str(e))
1339 if not pkg.installed:
1343 print "Exiting...", pkg
1347 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1349 Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
1350 Yields non-disjunctive deps. Raises InvalidDependString when
1354 while i < len(dep_struct):
1356 if isinstance(x, list):
1357 for y in self._queue_disjunctive_deps(
1358 pkg, dep_root, dep_priority, x):
1361 self._queue_disjunction(pkg, dep_root, dep_priority,
1362 [ x, dep_struct[ i + 1 ] ] )
1366 x = portage.dep.Atom(x)
1367 except portage.exception.InvalidAtom:
1368 if not pkg.installed:
1369 raise portage.exception.InvalidDependString(
1370 "invalid atom: '%s'" % x)
1372 # Note: Eventually this will check for PROPERTIES=virtual
1373 # or whatever other metadata gets implemented for this
1375 if x.cp.startswith('virtual/'):
1376 self._queue_disjunction( pkg, dep_root,
1377 dep_priority, [ str(x) ] )
1382 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1383 self._dep_disjunctive_stack.append(
1384 (pkg, dep_root, dep_priority, dep_struct))
1386 def _pop_disjunction(self, allow_unsatisfied):
1388 Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
1389 populate self._dep_stack.
1391 pkg, dep_root, dep_priority, dep_struct = \
1392 self._dep_disjunctive_stack.pop()
1393 dep_string = portage.dep.paren_enclose(dep_struct)
1394 if not self._add_pkg_dep_string(
1395 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1399 def _priority(self, **kwargs):
1400 if "remove" in self.myparams:
1401 priority_constructor = UnmergeDepPriority
1403 priority_constructor = DepPriority
1404 return priority_constructor(**kwargs)
1406 def _dep_expand(self, root_config, atom_without_category):
1408 @param root_config: a root config instance
1409 @type root_config: RootConfig
1410 @param atom_without_category: an atom without a category component
1411 @type atom_without_category: String
1413 @returns: a list of atoms containing categories (possibly empty)
1415 null_cp = portage.dep_getkey(insert_category_into_atom(
1416 atom_without_category, "null"))
1417 cat, atom_pn = portage.catsplit(null_cp)
1419 dbs = self._filtered_trees[root_config.root]["dbs"]
1421 for db, pkg_type, built, installed, db_keys in dbs:
1422 for cat in db.categories:
1423 if db.cp_list("%s/%s" % (cat, atom_pn)):
1427 for cat in categories:
1428 deps.append(insert_category_into_atom(
1429 atom_without_category, cat))
1432 def _have_new_virt(self, root, atom_cp):
1434 for db, pkg_type, built, installed, db_keys in \
1435 self._filtered_trees[root]["dbs"]:
1436 if db.cp_list(atom_cp):
1441 def _iter_atoms_for_pkg(self, pkg):
1442 # TODO: add multiple $ROOT support
1443 if pkg.root != self.target_root:
1445 atom_arg_map = self._atom_arg_map
1446 root_config = self.roots[pkg.root]
1447 for atom in self._set_atoms.iterAtomsForPackage(pkg):
1448 atom_cp = portage.dep_getkey(atom)
1449 if atom_cp != pkg.cp and \
1450 self._have_new_virt(pkg.root, atom_cp):
1452 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
1453 visible_pkgs.reverse() # descending order
1455 for visible_pkg in visible_pkgs:
1456 if visible_pkg.cp != atom_cp:
1458 if pkg >= visible_pkg:
1459 # This is descending order, and we're not
1460 # interested in any versions <= pkg given.
1462 if pkg.slot_atom != visible_pkg.slot_atom:
1463 higher_slot = visible_pkg
1465 if higher_slot is not None:
1467 for arg in atom_arg_map[(atom, pkg.root)]:
1468 if isinstance(arg, PackageArg) and \
1473 def select_files(self, myfiles):
1474 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
1475 appropriate depgraph and return a favorite list."""
1476 debug = "--debug" in self.myopts
1477 root_config = self.roots[self.target_root]
1478 sets = root_config.sets
1479 getSetAtoms = root_config.setconfig.getSetAtoms
1481 myroot = self.target_root
1482 dbs = self._filtered_trees[myroot]["dbs"]
1483 vardb = self.trees[myroot]["vartree"].dbapi
1484 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
1485 portdb = self.trees[myroot]["porttree"].dbapi
1486 bindb = self.trees[myroot]["bintree"].dbapi
1487 pkgsettings = self.pkgsettings[myroot]
1489 onlydeps = "--onlydeps" in self.myopts
1492 ext = os.path.splitext(x)[1]
1494 if not os.path.exists(x):
1496 os.path.join(pkgsettings["PKGDIR"], "All", x)):
1497 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1498 elif os.path.exists(
1499 os.path.join(pkgsettings["PKGDIR"], x)):
1500 x = os.path.join(pkgsettings["PKGDIR"], x)
1502 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
1503 print "!!! Please ensure the tbz2 exists as specified.\n"
1504 return 0, myfavorites
1505 mytbz2=portage.xpak.tbz2(x)
1506 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1507 if os.path.realpath(x) != \
1508 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
1509 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
1510 return 0, myfavorites
1511 db_keys = list(bindb._aux_cache_keys)
1512 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
1513 pkg = Package(type_name="binary", root_config=root_config,
1514 cpv=mykey, built=True, metadata=metadata,
1516 self._pkg_cache[pkg] = pkg
1517 args.append(PackageArg(arg=x, package=pkg,
1518 root_config=root_config))
1519 elif ext==".ebuild":
1520 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1521 pkgdir = os.path.dirname(ebuild_path)
1522 tree_root = os.path.dirname(os.path.dirname(pkgdir))
1523 cp = pkgdir[len(tree_root)+1:]
1524 e = portage.exception.PackageNotFound(
1525 ("%s is not in a valid portage tree " + \
1526 "hierarchy or does not exist") % x)
1527 if not portage.isvalidatom(cp):
1529 cat = portage.catsplit(cp)[0]
1530 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1531 if not portage.isvalidatom("="+mykey):
1533 ebuild_path = portdb.findname(mykey)
1535 if ebuild_path != os.path.join(os.path.realpath(tree_root),
1536 cp, os.path.basename(ebuild_path)):
1537 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
1538 return 0, myfavorites
1539 if mykey not in portdb.xmatch(
1540 "match-visible", portage.dep_getkey(mykey)):
1541 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
1542 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
1543 print colorize("BAD", "*** page for details.")
1544 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
1547 raise portage.exception.PackageNotFound(
1548 "%s is not in a valid portage tree hierarchy or does not exist" % x)
1549 db_keys = list(portdb._aux_cache_keys)
1550 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
1551 pkg = Package(type_name="ebuild", root_config=root_config,
1552 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
1553 pkgsettings.setcpv(pkg)
1554 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
1555 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
1556 self._pkg_cache[pkg] = pkg
1557 args.append(PackageArg(arg=x, package=pkg,
1558 root_config=root_config))
1559 elif x.startswith(os.path.sep):
1560 if not x.startswith(myroot):
1561 portage.writemsg(("\n\n!!! '%s' does not start with" + \
1562 " $ROOT.\n") % x, noiselevel=-1)
1564 # Queue these up since it's most efficient to handle
1565 # multiple files in a single iter_owners() call.
1566 lookup_owners.append(x)
1568 if x in ("system", "world"):
1570 if x.startswith(SETPREFIX):
1571 s = x[len(SETPREFIX):]
1573 raise portage.exception.PackageSetNotFound(s)
1576 # Recursively expand sets so that containment tests in
1577 # self._get_parent_sets() properly match atoms in nested
1578 # sets (like if world contains system).
1579 expanded_set = InternalPackageSet(
1580 initial_atoms=getSetAtoms(s))
1581 self._sets[s] = expanded_set
1582 args.append(SetArg(arg=x, set=expanded_set,
1583 root_config=root_config))
1585 if not is_valid_package_atom(x):
1586 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1588 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1589 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1591 # Don't expand categories or old-style virtuals here unless
1592 # necessary. Expansion of old-style virtuals here causes at
1593 # least the following problems:
1594 # 1) It's more difficult to determine which set(s) an atom
1595 # came from, if any.
1596 # 2) It takes away freedom from the resolver to choose other
1597 # possible expansions when necessary.
1599 args.append(AtomArg(arg=x, atom=x,
1600 root_config=root_config))
1602 expanded_atoms = self._dep_expand(root_config, x)
1603 installed_cp_set = set()
1604 for atom in expanded_atoms:
1605 atom_cp = portage.dep_getkey(atom)
1606 if vardb.cp_list(atom_cp):
1607 installed_cp_set.add(atom_cp)
1609 if len(installed_cp_set) > 1:
1610 non_virtual_cps = set()
1611 for atom_cp in installed_cp_set:
1612 if not atom_cp.startswith("virtual/"):
1613 non_virtual_cps.add(atom_cp)
1614 if len(non_virtual_cps) == 1:
1615 installed_cp_set = non_virtual_cps
1617 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1618 installed_cp = iter(installed_cp_set).next()
1619 expanded_atoms = [atom for atom in expanded_atoms \
1620 if portage.dep_getkey(atom) == installed_cp]
1622 if len(expanded_atoms) > 1:
1625 ambiguous_package_name(x, expanded_atoms, root_config,
1626 self.spinner, self.myopts)
1627 return False, myfavorites
1629 atom = expanded_atoms[0]
1631 null_atom = insert_category_into_atom(x, "null")
1632 null_cp = portage.dep_getkey(null_atom)
1633 cat, atom_pn = portage.catsplit(null_cp)
1634 virts_p = root_config.settings.get_virts_p().get(atom_pn)
1636 # Allow the depgraph to choose which virtual.
1637 atom = insert_category_into_atom(x, "virtual")
1639 atom = insert_category_into_atom(x, "null")
1641 args.append(AtomArg(arg=x, atom=atom,
1642 root_config=root_config))
1646 search_for_multiple = False
1647 if len(lookup_owners) > 1:
1648 search_for_multiple = True
1650 for x in lookup_owners:
1651 if not search_for_multiple and os.path.isdir(x):
1652 search_for_multiple = True
1653 relative_paths.append(x[len(myroot):])
1656 for pkg, relative_path in \
1657 real_vardb._owners.iter_owners(relative_paths):
1658 owners.add(pkg.mycpv)
1659 if not search_for_multiple:
1663 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1664 "by any package.\n") % lookup_owners[0], noiselevel=-1)
1668 slot = vardb.aux_get(cpv, ["SLOT"])[0]
1670 # portage now masks packages with missing slot, but it's
1671 # possible that one was installed by an older version
1672 atom = portage.cpv_getkey(cpv)
1674 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
1675 args.append(AtomArg(arg=atom, atom=atom,
1676 root_config=root_config))
1678 if "--update" in self.myopts:
1679 # In some cases, the greedy slots behavior can pull in a slot that
1680 # the user would want to uninstall due to it being blocked by a
1681 # newer version in a different slot. Therefore, it's necessary to
1682 # detect and discard any that should be uninstalled. Each time
1683 # that arguments are updated, package selections are repeated in
1684 # order to ensure consistency with the current arguments:
1686 # 1) Initialize args
1687 # 2) Select packages and generate initial greedy atoms
1688 # 3) Update args with greedy atoms
1689 # 4) Select packages and generate greedy atoms again, while
1690 # accounting for any blockers between selected packages
1691 # 5) Update args with revised greedy atoms
1693 self._set_args(args)
1696 greedy_args.append(arg)
1697 if not isinstance(arg, AtomArg):
1699 for atom in self._greedy_slots(arg.root_config, arg.atom):
1701 AtomArg(arg=arg.arg, atom=atom,
1702 root_config=arg.root_config))
1704 self._set_args(greedy_args)
1707 # Revise greedy atoms, accounting for any blockers
1708 # between selected packages.
1709 revised_greedy_args = []
1711 revised_greedy_args.append(arg)
1712 if not isinstance(arg, AtomArg):
1714 for atom in self._greedy_slots(arg.root_config, arg.atom,
1715 blocker_lookahead=True):
1716 revised_greedy_args.append(
1717 AtomArg(arg=arg.arg, atom=atom,
1718 root_config=arg.root_config))
1719 args = revised_greedy_args
1720 del revised_greedy_args
1722 self._set_args(args)
1724 myfavorites = set(myfavorites)
1726 if isinstance(arg, (AtomArg, PackageArg)):
1727 myfavorites.add(arg.atom)
1728 elif isinstance(arg, SetArg):
1729 myfavorites.add(arg.arg)
1730 myfavorites = list(myfavorites)
1732 pprovideddict = pkgsettings.pprovideddict
1734 portage.writemsg("\n", noiselevel=-1)
1735 # Order needs to be preserved since a feature of --nodeps
1736 # is to allow the user to force a specific merge order.
1740 for atom in arg.set:
1741 self.spinner.update()
1742 dep = Dependency(atom=atom, onlydeps=onlydeps,
1743 root=myroot, parent=arg)
1744 atom_cp = portage.dep_getkey(atom)
1746 pprovided = pprovideddict.get(portage.dep_getkey(atom))
1747 if pprovided and portage.match_from_list(atom, pprovided):
1748 # A provided package has been specified on the command line.
1749 self._pprovided_args.append((arg, atom))
1751 if isinstance(arg, PackageArg):
1752 if not self._add_pkg(arg.package, dep) or \
1753 not self._create_graph():
1754 sys.stderr.write(("\n\n!!! Problem resolving " + \
1755 "dependencies for %s\n") % arg.arg)
1756 return 0, myfavorites
1759 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
1760 (arg, atom), noiselevel=-1)
1761 pkg, existing_node = self._select_package(
1762 myroot, atom, onlydeps=onlydeps)
1764 if not (isinstance(arg, SetArg) and \
1765 arg.name in ("system", "world")):
1766 self._unsatisfied_deps_for_display.append(
1767 ((myroot, atom), {}))
1768 return 0, myfavorites
1769 self._missing_args.append((arg, atom))
1771 if atom_cp != pkg.cp:
1772 # For old-style virtuals, we need to repeat the
1773 # package.provided check against the selected package.
1774 expanded_atom = atom.replace(atom_cp, pkg.cp)
1775 pprovided = pprovideddict.get(pkg.cp)
1777 portage.match_from_list(expanded_atom, pprovided):
1778 # A provided package has been
1779 # specified on the command line.
1780 self._pprovided_args.append((arg, atom))
1782 if pkg.installed and "selective" not in self.myparams:
1783 self._unsatisfied_deps_for_display.append(
1784 ((myroot, atom), {}))
1785 # Previous behavior was to bail out in this case, but
1786 # since the dep is satisfied by the installed package,
1787 # it's more friendly to continue building the graph
1788 # and just show a warning message. Therefore, only bail
1789 # out here if the atom is not from either the system or
1791 if not (isinstance(arg, SetArg) and \
1792 arg.name in ("system", "world")):
1793 return 0, myfavorites
1795 # Add the selected package to the graph as soon as possible
1796 # so that later dep_check() calls can use it as feedback
1797 # for making more consistent atom selections.
1798 if not self._add_pkg(pkg, dep):
1799 if isinstance(arg, SetArg):
1800 sys.stderr.write(("\n\n!!! Problem resolving " + \
1801 "dependencies for %s from %s\n") % \
1804 sys.stderr.write(("\n\n!!! Problem resolving " + \
1805 "dependencies for %s\n") % atom)
1806 return 0, myfavorites
1808 except portage.exception.MissingSignature, e:
1809 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1810 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1811 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1812 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1813 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1814 return 0, myfavorites
1815 except portage.exception.InvalidSignature, e:
1816 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1817 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1818 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1819 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1820 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1821 return 0, myfavorites
1822 except SystemExit, e:
1823 raise # Needed else can't exit
1824 except Exception, e:
1825 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
1826 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
1829 # Now that the root packages have been added to the graph,
1830 # process the dependencies.
1831 if not self._create_graph():
1832 return 0, myfavorites
1835 if "--usepkgonly" in self.myopts:
1836 for xs in self.digraph.all_nodes():
1837 if not isinstance(xs, Package):
1839 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1843 print "Missing binary for:",xs[2]
1847 except self._unknown_internal_error:
1848 return False, myfavorites
1850 # We're true here unless we are missing binaries.
1851 return (not missing,myfavorites)
1853 def _set_args(self, args):
1855 Create the "args" package set from atoms and packages given as
1856 arguments. This method can be called multiple times if necessary.
1857 The package selection cache is automatically invalidated, since
1858 arguments influence package selections.
1860 args_set = self._sets["args"]
1863 if not isinstance(arg, (AtomArg, PackageArg)):
1866 if atom in args_set:
1870 self._set_atoms.clear()
1871 self._set_atoms.update(chain(*self._sets.itervalues()))
1872 atom_arg_map = self._atom_arg_map
1873 atom_arg_map.clear()
1875 for atom in arg.set:
1876 atom_key = (atom, arg.root_config.root)
1877 refs = atom_arg_map.get(atom_key)
1880 atom_arg_map[atom_key] = refs
1884 # Invalidate the package selection cache, since
1885 # arguments influence package selections.
1886 self._highest_pkg_cache.clear()
1887 for trees in self._filtered_trees.itervalues():
1888 trees["porttree"].dbapi._clear_cache()
1890 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1892 Return a list of slot atoms corresponding to installed slots that
1893 differ from the slot of the highest visible match. When
1894 blocker_lookahead is True, slot atoms that would trigger a blocker
1895 conflict are automatically discarded, potentially allowing automatic
1896 uninstallation of older slots when appropriate.
1898 highest_pkg, in_graph = self._select_package(root_config.root, atom)
1899 if highest_pkg is None:
1901 vardb = root_config.trees["vartree"].dbapi
1903 for cpv in vardb.match(atom):
1904 # don't mix new virtuals with old virtuals
1905 if portage.cpv_getkey(cpv) == highest_pkg.cp:
1906 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1908 slots.add(highest_pkg.metadata["SLOT"])
1912 slots.remove(highest_pkg.metadata["SLOT"])
1915 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1916 pkg, in_graph = self._select_package(root_config.root, slot_atom)
1917 if pkg is not None and \
1918 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1919 greedy_pkgs.append(pkg)
1922 if not blocker_lookahead:
1923 return [pkg.slot_atom for pkg in greedy_pkgs]
1926 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1927 for pkg in greedy_pkgs + [highest_pkg]:
1928 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1930 atoms = self._select_atoms(
1931 pkg.root, dep_str, pkg.use.enabled,
1932 parent=pkg, strict=True)
1933 except portage.exception.InvalidDependString:
1935 blocker_atoms = (x for x in atoms if x.blocker)
1936 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
1938 if highest_pkg not in blockers:
1941 # filter packages with invalid deps
1942 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
1944 # filter packages that conflict with highest_pkg
1945 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
1946 (blockers[highest_pkg].findAtomForPackage(pkg) or \
1947 blockers[pkg].findAtomForPackage(highest_pkg))]
1952 # If two packages conflict, discard the lower version.
1953 discard_pkgs = set()
1954 greedy_pkgs.sort(reverse=True)
1955 for i in xrange(len(greedy_pkgs) - 1):
1956 pkg1 = greedy_pkgs[i]
1957 if pkg1 in discard_pkgs:
1959 for j in xrange(i + 1, len(greedy_pkgs)):
1960 pkg2 = greedy_pkgs[j]
1961 if pkg2 in discard_pkgs:
1963 if blockers[pkg1].findAtomForPackage(pkg2) or \
1964 blockers[pkg2].findAtomForPackage(pkg1):
1966 discard_pkgs.add(pkg2)
1968 return [pkg.slot_atom for pkg in greedy_pkgs \
1969 if pkg not in discard_pkgs]
1971 def _select_atoms_from_graph(self, *pargs, **kwargs):
1973 Prefer atoms matching packages that have already been
1974 added to the graph or those that are installed and have
1975 not been scheduled for replacement.
1977 kwargs["trees"] = self._graph_trees
1978 return self._select_atoms_highest_available(*pargs, **kwargs)
1980 def _select_atoms_highest_available(self, root, depstring,
1981 myuse=None, parent=None, strict=True, trees=None, priority=None):
1982 """This will raise InvalidDependString if necessary. If trees is
1983 None then self._filtered_trees is used."""
1984 pkgsettings = self.pkgsettings[root]
1986 trees = self._filtered_trees
1987 if not getattr(priority, "buildtime", False):
1988 # The parent should only be passed to dep_check() for buildtime
1989 # dependencies since that's the only case when it's appropriate
1990 # to trigger the circular dependency avoidance code which uses it.
1991 # It's important not to trigger the same circular dependency
1992 # avoidance code for runtime dependencies since it's not needed
1993 # and it can promote an incorrect package choice.
1997 if parent is not None:
1998 trees[root]["parent"] = parent
2000 portage.dep._dep_check_strict = False
2001 mycheck = portage.dep_check(depstring, None,
2002 pkgsettings, myuse=myuse,
2003 myroot=root, trees=trees)
2005 if parent is not None:
2006 trees[root].pop("parent")
2007 portage.dep._dep_check_strict = True
2009 raise portage.exception.InvalidDependString(mycheck[1])
2010 selected_atoms = mycheck[1]
2011 return selected_atoms
2013 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
2014 atom = portage.dep.Atom(atom)
2015 atom_set = InternalPackageSet(initial_atoms=(atom,))
2016 atom_without_use = atom
2018 atom_without_use = portage.dep.remove_slot(atom)
2020 atom_without_use += ":" + atom.slot
2021 atom_without_use = portage.dep.Atom(atom_without_use)
2022 xinfo = '"%s"' % atom
2025 # Discard null/ from failed cpv_expand category expansion.
2026 xinfo = xinfo.replace("null/", "")
2027 masked_packages = []
2029 masked_pkg_instances = set()
2030 missing_licenses = []
2031 have_eapi_mask = False
2032 pkgsettings = self.pkgsettings[root]
2033 implicit_iuse = pkgsettings._get_implicit_iuse()
2034 root_config = self.roots[root]
2035 portdb = self.roots[root].trees["porttree"].dbapi
2036 dbs = self._filtered_trees[root]["dbs"]
2037 for db, pkg_type, built, installed, db_keys in dbs:
2041 if hasattr(db, "xmatch"):
2042 cpv_list = db.xmatch("match-all", atom_without_use)
2044 cpv_list = db.match(atom_without_use)
2047 for cpv in cpv_list:
2048 metadata, mreasons = get_mask_info(root_config, cpv,
2049 pkgsettings, db, pkg_type, built, installed, db_keys)
2050 if metadata is not None:
2051 pkg = Package(built=built, cpv=cpv,
2052 installed=installed, metadata=metadata,
2053 root_config=root_config)
2054 if pkg.cp != atom.cp:
2055 # A cpv can be returned from dbapi.match() as an
2056 # old-style virtual match even in cases when the
2057 # package does not actually PROVIDE the virtual.
2058 # Filter out any such false matches here.
2059 if not atom_set.findAtomForPackage(pkg):
2062 masked_pkg_instances.add(pkg)
2064 missing_use.append(pkg)
2067 masked_packages.append(
2068 (root_config, pkgsettings, cpv, metadata, mreasons))
2070 missing_use_reasons = []
2071 missing_iuse_reasons = []
2072 for pkg in missing_use:
2073 use = pkg.use.enabled
2074 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
2075 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
2077 for x in atom.use.required:
2078 if iuse_re.match(x) is None:
2079 missing_iuse.append(x)
2082 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2083 missing_iuse_reasons.append((pkg, mreasons))
2085 need_enable = sorted(atom.use.enabled.difference(use))
2086 need_disable = sorted(atom.use.disabled.intersection(use))
2087 if need_enable or need_disable:
2089 changes.extend(colorize("red", "+" + x) \
2090 for x in need_enable)
2091 changes.extend(colorize("blue", "-" + x) \
2092 for x in need_disable)
2093 mreasons.append("Change USE: %s" % " ".join(changes))
2094 missing_use_reasons.append((pkg, mreasons))
2096 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2097 in missing_use_reasons if pkg not in masked_pkg_instances]
2099 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2100 in missing_iuse_reasons if pkg not in masked_pkg_instances]
2102 show_missing_use = False
2103 if unmasked_use_reasons:
2104 # Only show the latest version.
2105 show_missing_use = unmasked_use_reasons[:1]
2106 elif unmasked_iuse_reasons:
2107 if missing_use_reasons:
2108 # All packages with required IUSE are masked,
2109 # so display a normal masking message.
2112 show_missing_use = unmasked_iuse_reasons
2114 if show_missing_use:
2115 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
2116 print "!!! One of the following packages is required to complete your request:"
2117 for pkg, mreasons in show_missing_use:
2118 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
2120 elif masked_packages:
2122 colorize("BAD", "All ebuilds that could satisfy ") + \
2123 colorize("INFORM", xinfo) + \
2124 colorize("BAD", " have been masked.")
2125 print "!!! One of the following masked packages is required to complete your request:"
2126 have_eapi_mask = show_masked_packages(masked_packages)
2129 msg = ("The current version of portage supports " + \
2130 "EAPI '%s'. You must upgrade to a newer version" + \
2131 " of portage before EAPI masked packages can" + \
2132 " be installed.") % portage.const.EAPI
2133 from textwrap import wrap
2134 for line in wrap(msg, 75):
2139 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
2141 # Show parent nodes and the argument that pulled them in.
2142 traversed_nodes = set()
2145 while node is not None:
2146 traversed_nodes.add(node)
2147 msg.append('(dependency required by "%s" [%s])' % \
2148 (colorize('INFORM', str(node.cpv)), node.type_name))
2149 # When traversing to parents, prefer arguments over packages
2150 # since arguments are root nodes. Never traverse the same
2151 # package twice, in order to prevent an infinite loop.
2152 selected_parent = None
2153 for parent in self.digraph.parent_nodes(node):
2154 if isinstance(parent, DependencyArg):
2155 msg.append('(dependency required by "%s" [argument])' % \
2156 (colorize('INFORM', str(parent))))
2157 selected_parent = None
2159 if parent not in traversed_nodes:
2160 selected_parent = parent
2161 node = selected_parent
2167 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2168 cache_key = (root, atom, onlydeps)
2169 ret = self._highest_pkg_cache.get(cache_key)
2172 if pkg and not existing:
2173 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
2174 if existing and existing == pkg:
2175 # Update the cache to reflect that the
2176 # package has been added to the graph.
2178 self._highest_pkg_cache[cache_key] = ret
2180 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2181 self._highest_pkg_cache[cache_key] = ret
2184 settings = pkg.root_config.settings
2185 if visible(settings, pkg) and not (pkg.installed and \
2186 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2187 pkg.root_config.visible_pkgs.cpv_inject(pkg)
2190 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2191 root_config = self.roots[root]
2192 pkgsettings = self.pkgsettings[root]
2193 dbs = self._filtered_trees[root]["dbs"]
2194 vardb = self.roots[root].trees["vartree"].dbapi
2195 portdb = self.roots[root].trees["porttree"].dbapi
2196 # List of acceptable packages, ordered by type preference.
2197 matched_packages = []
2198 highest_version = None
2199 if not isinstance(atom, portage.dep.Atom):
2200 atom = portage.dep.Atom(atom)
2202 atom_set = InternalPackageSet(initial_atoms=(atom,))
2203 existing_node = None
2205 usepkgonly = "--usepkgonly" in self.myopts
2206 empty = "empty" in self.myparams
2207 selective = "selective" in self.myparams
2209 noreplace = "--noreplace" in self.myopts
2210 # Behavior of the "selective" parameter depends on
2211 # whether or not a package matches an argument atom.
2212 # If an installed package provides an old-style
2213 # virtual that is no longer provided by an available
2214 # package, the installed package may match an argument
2215 # atom even though none of the available packages do.
2216 # Therefore, "selective" logic does not consider
2217 # whether or not an installed package matches an
2218 # argument atom. It only considers whether or not
2219 # available packages match argument atoms, which is
2220 # represented by the found_available_arg flag.
2221 found_available_arg = False
2222 for find_existing_node in True, False:
2225 for db, pkg_type, built, installed, db_keys in dbs:
2228 if installed and not find_existing_node:
2229 want_reinstall = reinstall or empty or \
2230 (found_available_arg and not selective)
2231 if want_reinstall and matched_packages:
2233 if hasattr(db, "xmatch"):
2234 cpv_list = db.xmatch("match-all", atom)
2236 cpv_list = db.match(atom)
2238 # USE=multislot can make an installed package appear as if
2239 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2240 # won't do any good as long as USE=multislot is enabled since
2241 # the newly built package still won't have the expected slot.
2242 # Therefore, assume that such SLOT dependencies are already
2243 # satisfied rather than forcing a rebuild.
2244 if installed and not cpv_list and atom.slot:
2245 for cpv in db.match(atom.cp):
2246 slot_available = False
2247 for other_db, other_type, other_built, \
2248 other_installed, other_keys in dbs:
2251 other_db.aux_get(cpv, ["SLOT"])[0]:
2252 slot_available = True
2256 if not slot_available:
2258 inst_pkg = self._pkg(cpv, "installed",
2259 root_config, installed=installed)
2260 # Remove the slot from the atom and verify that
2261 # the package matches the resulting atom.
2262 atom_without_slot = portage.dep.remove_slot(atom)
2264 atom_without_slot += str(atom.use)
2265 atom_without_slot = portage.dep.Atom(atom_without_slot)
2266 if portage.match_from_list(
2267 atom_without_slot, [inst_pkg]):
2268 cpv_list = [inst_pkg.cpv]
2273 pkg_status = "merge"
2274 if installed or onlydeps:
2275 pkg_status = "nomerge"
2278 for cpv in cpv_list:
2279 # Make --noreplace take precedence over --newuse.
2280 if not installed and noreplace and \
2281 cpv in vardb.match(atom):
2282 # If the installed version is masked, it may
2283 # be necessary to look at lower versions,
2284 # in case there is a visible downgrade.
2286 reinstall_for_flags = None
2287 cache_key = (pkg_type, root, cpv, pkg_status)
2288 calculated_use = True
2289 pkg = self._pkg_cache.get(cache_key)
2291 calculated_use = False
2293 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
2296 pkg = Package(built=built, cpv=cpv,
2297 installed=installed, metadata=metadata,
2298 onlydeps=onlydeps, root_config=root_config,
2300 metadata = pkg.metadata
2302 metadata['CHOST'] = pkgsettings.get('CHOST', '')
2303 if not built and ("?" in metadata["LICENSE"] or \
2304 "?" in metadata["PROVIDE"]):
2305 # This is avoided whenever possible because
2306 # it's expensive. It only needs to be done here
2307 # if it has an effect on visibility.
2308 pkgsettings.setcpv(pkg)
2309 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2310 calculated_use = True
2311 self._pkg_cache[pkg] = pkg
2313 if not installed or (built and matched_packages):
2314 # Only enforce visibility on installed packages
2315 # if there is at least one other visible package
2316 # available. By filtering installed masked packages
2317 # here, packages that have been masked since they
2318 # were installed can be automatically downgraded
2319 # to an unmasked version.
2321 if not visible(pkgsettings, pkg):
2323 except portage.exception.InvalidDependString:
2327 # Enable upgrade or downgrade to a version
2328 # with visible KEYWORDS when the installed
2329 # version is masked by KEYWORDS, but never
2330 # reinstall the same exact version only due
2331 # to a KEYWORDS mask.
2332 if built and matched_packages:
2334 different_version = None
2335 for avail_pkg in matched_packages:
2336 if not portage.dep.cpvequal(
2337 pkg.cpv, avail_pkg.cpv):
2338 different_version = avail_pkg
2340 if different_version is not None:
2343 pkgsettings._getMissingKeywords(
2344 pkg.cpv, pkg.metadata):
2347 # If the ebuild no longer exists or it's
2348 # keywords have been dropped, reject built
2349 # instances (installed or binary).
2350 # If --usepkgonly is enabled, assume that
2351 # the ebuild status should be ignored.
2355 pkg.cpv, "ebuild", root_config)
2356 except portage.exception.PackageNotFound:
2359 if not visible(pkgsettings, pkg_eb):
2362 if not pkg.built and not calculated_use:
2363 # This is avoided whenever possible because
2365 pkgsettings.setcpv(pkg)
2366 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
2368 if pkg.cp != atom.cp:
2369 # A cpv can be returned from dbapi.match() as an
2370 # old-style virtual match even in cases when the
2371 # package does not actually PROVIDE the virtual.
2372 # Filter out any such false matches here.
2373 if not atom_set.findAtomForPackage(pkg):
2377 if root == self.target_root:
2379 # Ebuild USE must have been calculated prior
2380 # to this point, in case atoms have USE deps.
2381 myarg = self._iter_atoms_for_pkg(pkg).next()
2382 except StopIteration:
2384 except portage.exception.InvalidDependString:
2386 # masked by corruption
2388 if not installed and myarg:
2389 found_available_arg = True
2391 if atom.use and not pkg.built:
2392 use = pkg.use.enabled
2393 if atom.use.enabled.difference(use):
2395 if atom.use.disabled.intersection(use):
2397 if pkg.cp == atom_cp:
2398 if highest_version is None:
2399 highest_version = pkg
2400 elif pkg > highest_version:
2401 highest_version = pkg
2402 # At this point, we've found the highest visible
2403 # match from the current repo. Any lower versions
2404 # from this repo are ignored, so this so the loop
2405 # will always end with a break statement below
2407 if find_existing_node:
2408 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
2411 if portage.dep.match_from_list(atom, [e_pkg]):
2412 if highest_version and \
2413 e_pkg.cp == atom_cp and \
2414 e_pkg < highest_version and \
2415 e_pkg.slot_atom != highest_version.slot_atom:
2416 # There is a higher version available in a
2417 # different slot, so this existing node is
2421 matched_packages.append(e_pkg)
2422 existing_node = e_pkg
2424 # Compare built package to current config and
2425 # reject the built package if necessary.
2426 if built and not installed and \
2427 ("--newuse" in self.myopts or \
2428 "--reinstall" in self.myopts):
2429 iuses = pkg.iuse.all
2430 old_use = pkg.use.enabled
2432 pkgsettings.setcpv(myeb)
2434 pkgsettings.setcpv(pkg)
2435 now_use = pkgsettings["PORTAGE_USE"].split()
2436 forced_flags = set()
2437 forced_flags.update(pkgsettings.useforce)
2438 forced_flags.update(pkgsettings.usemask)
2440 if myeb and not usepkgonly:
2441 cur_iuse = myeb.iuse.all
2442 if self._reinstall_for_flags(forced_flags,
2446 # Compare current config to installed package
2447 # and do not reinstall if possible.
2448 if not installed and \
2449 ("--newuse" in self.myopts or \
2450 "--reinstall" in self.myopts) and \
2451 cpv in vardb.match(atom):
2452 pkgsettings.setcpv(pkg)
2453 forced_flags = set()
2454 forced_flags.update(pkgsettings.useforce)
2455 forced_flags.update(pkgsettings.usemask)
2456 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2457 old_iuse = set(filter_iuse_defaults(
2458 vardb.aux_get(cpv, ["IUSE"])[0].split()))
2459 cur_use = pkg.use.enabled
2460 cur_iuse = pkg.iuse.all
2461 reinstall_for_flags = \
2462 self._reinstall_for_flags(
2463 forced_flags, old_use, old_iuse,
2465 if reinstall_for_flags:
2469 matched_packages.append(pkg)
2470 if reinstall_for_flags:
2471 self._reinstall_nodes[pkg] = \
2475 if not matched_packages:
2478 if "--debug" in self.myopts:
2479 for pkg in matched_packages:
2480 portage.writemsg("%s %s\n" % \
2481 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2483 # Filter out any old-style virtual matches if they are
2484 # mixed with new-style virtual matches.
2485 cp = portage.dep_getkey(atom)
2486 if len(matched_packages) > 1 and \
2487 "virtual" == portage.catsplit(cp)[0]:
2488 for pkg in matched_packages:
2491 # Got a new-style virtual, so filter
2492 # out any old-style virtuals.
2493 matched_packages = [pkg for pkg in matched_packages \
2497 if len(matched_packages) > 1:
2498 bestmatch = portage.best(
2499 [pkg.cpv for pkg in matched_packages])
2500 matched_packages = [pkg for pkg in matched_packages \
2501 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2503 # ordered by type preference ("ebuild" type is the last resort)
2504 return matched_packages[-1], existing_node
2506 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2508 Select packages that have already been added to the graph or
2509 those that are installed and have not been scheduled for
2512 graph_db = self._graph_trees[root]["porttree"].dbapi
2513 matches = graph_db.match_pkgs(atom)
2516 pkg = matches[-1] # highest match
2517 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
2518 return pkg, in_graph
2520 def _complete_graph(self):
2522 Add any deep dependencies of required sets (args, system, world) that
2523 have not been pulled into the graph yet. This ensures that the graph
2524 is consistent such that initially satisfied deep dependencies are not
2525 broken in the new graph. Initially unsatisfied dependencies are
2526 irrelevant since we only want to avoid breaking dependencies that are
2529 Since this method can consume enough time to disturb users, it is
2530 currently only enabled by the --complete-graph option.
2532 if "--buildpkgonly" in self.myopts or \
2533 "recurse" not in self.myparams:
2536 if "complete" not in self.myparams:
2537 # Skip this to avoid consuming enough time to disturb users.
2540 # Put the depgraph into a mode that causes it to only
2541 # select packages that have already been added to the
2542 # graph or those that are installed and have not been
2543 # scheduled for replacement. Also, toggle the "deep"
2544 # parameter so that all dependencies are traversed and
2546 self._select_atoms = self._select_atoms_from_graph
2547 self._select_package = self._select_pkg_from_graph
2548 already_deep = "deep" in self.myparams
2549 if not already_deep:
2550 self.myparams.add("deep")
2552 for root in self.roots:
2553 required_set_names = self._required_set_names.copy()
2554 if root == self.target_root and \
2555 (already_deep or "empty" in self.myparams):
2556 required_set_names.difference_update(self._sets)
2557 if not required_set_names and not self._ignored_deps:
2559 root_config = self.roots[root]
2560 setconfig = root_config.setconfig
2562 # Reuse existing SetArg instances when available.
2563 for arg in self.digraph.root_nodes():
2564 if not isinstance(arg, SetArg):
2566 if arg.root_config != root_config:
2568 if arg.name in required_set_names:
2570 required_set_names.remove(arg.name)
2571 # Create new SetArg instances only when necessary.
2572 for s in required_set_names:
2573 expanded_set = InternalPackageSet(
2574 initial_atoms=setconfig.getSetAtoms(s))
2575 atom = SETPREFIX + s
2576 args.append(SetArg(arg=atom, set=expanded_set,
2577 root_config=root_config))
2578 vardb = root_config.trees["vartree"].dbapi
2580 for atom in arg.set:
2581 self._dep_stack.append(
2582 Dependency(atom=atom, root=root, parent=arg))
2583 if self._ignored_deps:
2584 self._dep_stack.extend(self._ignored_deps)
2585 self._ignored_deps = []
2586 if not self._create_graph(allow_unsatisfied=True):
2588 # Check the unsatisfied deps to see if any initially satisfied deps
2589 # will become unsatisfied due to an upgrade. Initially unsatisfied
2590 # deps are irrelevant since we only want to avoid breaking deps
2591 # that are initially satisfied.
2592 while self._unsatisfied_deps:
2593 dep = self._unsatisfied_deps.pop()
2594 matches = vardb.match_pkgs(dep.atom)
2596 self._initially_unsatisfied_deps.append(dep)
2598 # An scheduled installation broke a deep dependency.
2599 # Add the installed package to the graph so that it
2600 # will be appropriately reported as a slot collision
2601 # (possibly solvable via backtracking).
2602 pkg = matches[-1] # highest match
2603 if not self._add_pkg(pkg, dep):
2605 if not self._create_graph(allow_unsatisfied=True):
2609 def _pkg(self, cpv, type_name, root_config, installed=False):
2611 Get a package instance from the cache, or create a new
2612 one if necessary. Raises KeyError from aux_get if it
2613 failures for some reason (package does not exist or is
2618 operation = "nomerge"
2619 pkg = self._pkg_cache.get(
2620 (type_name, root_config.root, cpv, operation))
2622 tree_type = self.pkg_tree_map[type_name]
2623 db = root_config.trees[tree_type].dbapi
2624 db_keys = list(self._trees_orig[root_config.root][
2625 tree_type].dbapi._aux_cache_keys)
2627 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
2629 raise portage.exception.PackageNotFound(cpv)
2630 pkg = Package(cpv=cpv, metadata=metadata,
2631 root_config=root_config, installed=installed)
2632 if type_name == "ebuild":
2633 settings = self.pkgsettings[root_config.root]
2634 settings.setcpv(pkg)
2635 pkg.metadata["USE"] = settings["PORTAGE_USE"]
2636 pkg.metadata['CHOST'] = settings.get('CHOST', '')
2637 self._pkg_cache[pkg] = pkg
2640 def validate_blockers(self):
2641 """Remove any blockers from the digraph that do not match any of the
2642 packages within the graph. If necessary, create hard deps to ensure
2643 correct merge order such that mutually blocking packages are never
2644 installed simultaneously."""
2646 if "--buildpkgonly" in self.myopts or \
2647 "--nodeps" in self.myopts:
2650 #if "deep" in self.myparams:
2652 # Pull in blockers from all installed packages that haven't already
2653 # been pulled into the depgraph. This is not enabled by default
2654 # due to the performance penalty that is incurred by all the
2655 # additional dep_check calls that are required.
2657 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2658 for myroot in self.trees:
2659 vardb = self.trees[myroot]["vartree"].dbapi
2660 portdb = self.trees[myroot]["porttree"].dbapi
2661 pkgsettings = self.pkgsettings[myroot]
2662 final_db = self.mydbapi[myroot]
2664 blocker_cache = BlockerCache(myroot, vardb)
2665 stale_cache = set(blocker_cache)
2668 stale_cache.discard(cpv)
2669 pkg_in_graph = self.digraph.contains(pkg)
2671 # Check for masked installed packages. Only warn about
2672 # packages that are in the graph in order to avoid warning
2673 # about those that will be automatically uninstalled during
2674 # the merge process or by --depclean.
2676 if pkg_in_graph and not visible(pkgsettings, pkg):
2677 self._masked_installed.add(pkg)
2679 blocker_atoms = None
2685 self._blocker_parents.child_nodes(pkg))
2690 self._irrelevant_blockers.child_nodes(pkg))
2693 if blockers is not None:
2694 blockers = set(str(blocker.atom) \
2695 for blocker in blockers)
2697 # If this node has any blockers, create a "nomerge"
2698 # node for it so that they can be enforced.
2699 self.spinner.update()
2700 blocker_data = blocker_cache.get(cpv)
2701 if blocker_data is not None and \
2702 blocker_data.counter != long(pkg.metadata["COUNTER"]):
2705 # If blocker data from the graph is available, use
2706 # it to validate the cache and update the cache if
2708 if blocker_data is not None and \
2709 blockers is not None:
2710 if not blockers.symmetric_difference(
2711 blocker_data.atoms):
2715 if blocker_data is None and \
2716 blockers is not None:
2717 # Re-use the blockers from the graph.
2718 blocker_atoms = sorted(blockers)
2719 counter = long(pkg.metadata["COUNTER"])
2721 blocker_cache.BlockerData(counter, blocker_atoms)
2722 blocker_cache[pkg.cpv] = blocker_data
2726 blocker_atoms = blocker_data.atoms
2728 # Use aux_get() to trigger FakeVartree global
2729 # updates on *DEPEND when appropriate.
2730 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2731 # It is crucial to pass in final_db here in order to
2732 # optimize dep_check calls by eliminating atoms via
2733 # dep_wordreduce and dep_eval calls.
2735 portage.dep._dep_check_strict = False
2737 success, atoms = portage.dep_check(depstr,
2738 final_db, pkgsettings, myuse=pkg.use.enabled,
2739 trees=self._graph_trees, myroot=myroot)
2740 except Exception, e:
2741 if isinstance(e, SystemExit):
2743 # This is helpful, for example, if a ValueError
2744 # is thrown from cpv_expand due to multiple
2745 # matches (this can happen if an atom lacks a
2747 show_invalid_depstring_notice(
2748 pkg, depstr, str(e))
2752 portage.dep._dep_check_strict = True
2754 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2755 if replacement_pkg and \
2756 replacement_pkg[0].operation == "merge":
2757 # This package is being replaced anyway, so
2758 # ignore invalid dependencies so as not to
2759 # annoy the user too much (otherwise they'd be
2760 # forced to manually unmerge it first).
2762 show_invalid_depstring_notice(pkg, depstr, atoms)
2764 blocker_atoms = [myatom for myatom in atoms \
2765 if myatom.startswith("!")]
2766 blocker_atoms.sort()
2767 counter = long(pkg.metadata["COUNTER"])
2768 blocker_cache[cpv] = \
2769 blocker_cache.BlockerData(counter, blocker_atoms)
2772 for atom in blocker_atoms:
2773 blocker = Blocker(atom=portage.dep.Atom(atom),
2774 eapi=pkg.metadata["EAPI"], root=myroot)
2775 self._blocker_parents.add(blocker, pkg)
2776 except portage.exception.InvalidAtom, e:
2777 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2778 show_invalid_depstring_notice(
2779 pkg, depstr, "Invalid Atom: %s" % (e,))
2781 for cpv in stale_cache:
2782 del blocker_cache[cpv]
2783 blocker_cache.flush()
2786 # Discard any "uninstall" tasks scheduled by previous calls
2787 # to this method, since those tasks may not make sense given
2788 # the current graph state.
2789 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
2790 if previous_uninstall_tasks:
2791 self._blocker_uninstalls = digraph()
2792 self.digraph.difference_update(previous_uninstall_tasks)
2794 for blocker in self._blocker_parents.leaf_nodes():
2795 self.spinner.update()
2796 root_config = self.roots[blocker.root]
2797 virtuals = root_config.settings.getvirtuals()
2798 myroot = blocker.root
2799 initial_db = self.trees[myroot]["vartree"].dbapi
2800 final_db = self.mydbapi[myroot]
2802 provider_virtual = False
2803 if blocker.cp in virtuals and \
2804 not self._have_new_virt(blocker.root, blocker.cp):
2805 provider_virtual = True
2807 # Use this to check PROVIDE for each matched package
2809 atom_set = InternalPackageSet(
2810 initial_atoms=[blocker.atom])
2812 if provider_virtual:
2814 for provider_entry in virtuals[blocker.cp]:
2816 portage.dep_getkey(provider_entry)
2817 atoms.append(blocker.atom.replace(
2818 blocker.cp, provider_cp))
2820 atoms = [blocker.atom]
2822 blocked_initial = set()
2824 for pkg in initial_db.match_pkgs(atom):
2825 if atom_set.findAtomForPackage(pkg):
2826 blocked_initial.add(pkg)
2828 blocked_final = set()
2830 for pkg in final_db.match_pkgs(atom):
2831 if atom_set.findAtomForPackage(pkg):
2832 blocked_final.add(pkg)
2834 if not blocked_initial and not blocked_final:
2835 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
2836 self._blocker_parents.remove(blocker)
2837 # Discard any parents that don't have any more blockers.
2838 for pkg in parent_pkgs:
2839 self._irrelevant_blockers.add(blocker, pkg)
2840 if not self._blocker_parents.child_nodes(pkg):
2841 self._blocker_parents.remove(pkg)
2843 for parent in self._blocker_parents.parent_nodes(blocker):
2844 unresolved_blocks = False
2845 depends_on_order = set()
2846 for pkg in blocked_initial:
2847 if pkg.slot_atom == parent.slot_atom:
2848 # TODO: Support blocks within slots in cases where it
2849 # might make sense. For example, a new version might
2850 # require that the old version be uninstalled at build
2853 if parent.installed:
2854 # Two currently installed packages conflict with
2855 # eachother. Ignore this case since the damage
2856 # is already done and this would be likely to
2857 # confuse users if displayed like a normal blocker.
2860 self._blocked_pkgs.add(pkg, blocker)
2862 if parent.operation == "merge":
2863 # Maybe the blocked package can be replaced or simply
2864 # unmerged to resolve this block.
2865 depends_on_order.add((pkg, parent))
2867 # None of the above blocker resolutions techniques apply,
2868 # so apparently this one is unresolvable.
2869 unresolved_blocks = True
2870 for pkg in blocked_final:
2871 if pkg.slot_atom == parent.slot_atom:
2872 # TODO: Support blocks within slots.
2874 if parent.operation == "nomerge" and \
2875 pkg.operation == "nomerge":
2876 # This blocker will be handled the next time that a
2877 # merge of either package is triggered.
2880 self._blocked_pkgs.add(pkg, blocker)
2882 # Maybe the blocking package can be
2883 # unmerged to resolve this block.
2884 if parent.operation == "merge" and pkg.installed:
2885 depends_on_order.add((pkg, parent))
2887 elif parent.operation == "nomerge":
2888 depends_on_order.add((parent, pkg))
2890 # None of the above blocker resolutions techniques apply,
2891 # so apparently this one is unresolvable.
2892 unresolved_blocks = True
2894 # Make sure we don't unmerge any package that have been pulled
2896 if not unresolved_blocks and depends_on_order:
2897 for inst_pkg, inst_task in depends_on_order:
2898 if self.digraph.contains(inst_pkg) and \
2899 self.digraph.parent_nodes(inst_pkg):
2900 unresolved_blocks = True
2903 if not unresolved_blocks and depends_on_order:
2904 for inst_pkg, inst_task in depends_on_order:
2905 uninst_task = Package(built=inst_pkg.built,
2906 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
2907 metadata=inst_pkg.metadata,
2908 operation="uninstall",
2909 root_config=inst_pkg.root_config,
2910 type_name=inst_pkg.type_name)
2911 self._pkg_cache[uninst_task] = uninst_task
2912 # Enforce correct merge order with a hard dep.
2913 self.digraph.addnode(uninst_task, inst_task,
2914 priority=BlockerDepPriority.instance)
2915 # Count references to this blocker so that it can be
2916 # invalidated after nodes referencing it have been
2918 self._blocker_uninstalls.addnode(uninst_task, blocker)
2919 if not unresolved_blocks and not depends_on_order:
2920 self._irrelevant_blockers.add(blocker, parent)
2921 self._blocker_parents.remove_edge(blocker, parent)
2922 if not self._blocker_parents.parent_nodes(blocker):
2923 self._blocker_parents.remove(blocker)
2924 if not self._blocker_parents.child_nodes(parent):
2925 self._blocker_parents.remove(parent)
2926 if unresolved_blocks:
2927 self._unsolvable_blockers.add(blocker, parent)
2931 def _accept_blocker_conflicts(self):
2933 for x in ("--buildpkgonly", "--fetchonly",
2934 "--fetch-all-uri", "--nodeps"):
2935 if x in self.myopts:
2940 def _merge_order_bias(self, mygraph):
2942 For optimal leaf node selection, promote deep system runtime deps and
2943 order nodes from highest to lowest overall reference count.
2947 for node in mygraph.order:
2948 node_info[node] = len(mygraph.parent_nodes(node))
2949 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
2951 def cmp_merge_preference(node1, node2):
2953 if node1.operation == 'uninstall':
2954 if node2.operation == 'uninstall':
2958 if node2.operation == 'uninstall':
2959 if node1.operation == 'uninstall':
2963 node1_sys = node1 in deep_system_deps
2964 node2_sys = node2 in deep_system_deps
2965 if node1_sys != node2_sys:
2970 return node_info[node2] - node_info[node1]
2972 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
2974 def altlist(self, reversed=False):
2976 while self._serialized_tasks_cache is None:
2977 self._resolve_conflicts()
2979 self._serialized_tasks_cache, self._scheduler_graph = \
2980 self._serialize_tasks()
2981 except self._serialize_tasks_retry:
2984 retlist = self._serialized_tasks_cache[:]
2989 def schedulerGraph(self):
2991 The scheduler graph is identical to the normal one except that
2992 uninstall edges are reversed in specific cases that require
2993 conflicting packages to be temporarily installed simultaneously.
2994 This is intended for use by the Scheduler in it's parallelization
2995 logic. It ensures that temporary simultaneous installation of
2996 conflicting packages is avoided when appropriate (especially for
2997 !!atom blockers), but allowed in specific cases that require it.
2999 Note that this method calls break_refs() which alters the state of
3000 internal Package instances such that this depgraph instance should
3001 not be used to perform any more calculations.
3003 if self._scheduler_graph is None:
3005 self.break_refs(self._scheduler_graph.order)
3006 return self._scheduler_graph
3008 def break_refs(self, nodes):
3010 Take a mergelist like that returned from self.altlist() and
3011 break any references that lead back to the depgraph. This is
3012 useful if you want to hold references to packages without
3013 also holding the depgraph on the heap.
3016 if hasattr(node, "root_config"):
3017 # The FakeVartree references the _package_cache which
3018 # references the depgraph. So that Package instances don't
3019 # hold the depgraph and FakeVartree on the heap, replace
3020 # the RootConfig that references the FakeVartree with the
3021 # original RootConfig instance which references the actual
3023 node.root_config = \
3024 self._trees_orig[node.root_config.root]["root_config"]
3026 def _resolve_conflicts(self):
3027 if not self._complete_graph():
3028 raise self._unknown_internal_error()
3030 if not self.validate_blockers():
3031 raise self._unknown_internal_error()
3033 if self._slot_collision_info:
3034 self._process_slot_conflicts()
3036 def _serialize_tasks(self):
3038 if "--debug" in self.myopts:
3039 writemsg("\ndigraph:\n\n", noiselevel=-1)
3040 self.digraph.debug_print()
3041 writemsg("\n", noiselevel=-1)
3043 scheduler_graph = self.digraph.copy()
3045 if '--nodeps' in self.myopts:
3046 # Preserve the package order given on the command line.
3047 return ([node for node in scheduler_graph \
3048 if isinstance(node, Package) \
3049 and node.operation == 'merge'], scheduler_graph)
3051 mygraph=self.digraph.copy()
3052 # Prune "nomerge" root nodes if nothing depends on them, since
3053 # otherwise they slow down merge order calculation. Don't remove
3054 # non-root nodes since they help optimize merge order in some cases
3055 # such as revdep-rebuild.
3056 removed_nodes = set()
3058 for node in mygraph.root_nodes():
3059 if not isinstance(node, Package) or \
3060 node.installed or node.onlydeps:
3061 removed_nodes.add(node)
3063 self.spinner.update()
3064 mygraph.difference_update(removed_nodes)
3065 if not removed_nodes:
3067 removed_nodes.clear()
3068 self._merge_order_bias(mygraph)
3069 def cmp_circular_bias(n1, n2):
3071 RDEPEND is stronger than PDEPEND and this function
3072 measures such a strength bias within a circular
3073 dependency relationship.
3075 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3076 ignore_priority=priority_range.ignore_medium_soft)
3077 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3078 ignore_priority=priority_range.ignore_medium_soft)
3079 if n1_n2_medium == n2_n1_medium:
3084 myblocker_uninstalls = self._blocker_uninstalls.copy()
3086 # Contains uninstall tasks that have been scheduled to
3087 # occur after overlapping blockers have been installed.
3088 scheduled_uninstalls = set()
3089 # Contains any Uninstall tasks that have been ignored
3090 # in order to avoid the circular deps code path. These
3091 # correspond to blocker conflicts that could not be
3093 ignored_uninstall_tasks = set()
3094 have_uninstall_task = False
3095 complete = "complete" in self.myparams
3098 def get_nodes(**kwargs):
3100 Returns leaf nodes excluding Uninstall instances
3101 since those should be executed as late as possible.
3103 return [node for node in mygraph.leaf_nodes(**kwargs) \
3104 if isinstance(node, Package) and \
3105 (node.operation != "uninstall" or \
3106 node in scheduled_uninstalls)]
3108 # sys-apps/portage needs special treatment if ROOT="/"
3109 running_root = self._running_root.root
3110 from portage.const import PORTAGE_PACKAGE_ATOM
3111 runtime_deps = InternalPackageSet(
3112 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3113 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
3114 PORTAGE_PACKAGE_ATOM)
3115 replacement_portage = self.mydbapi[running_root].match_pkgs(
3116 PORTAGE_PACKAGE_ATOM)
3119 running_portage = running_portage[0]
3121 running_portage = None
3123 if replacement_portage:
3124 replacement_portage = replacement_portage[0]
3126 replacement_portage = None
3128 if replacement_portage == running_portage:
3129 replacement_portage = None
3131 if replacement_portage is not None:
3132 # update from running_portage to replacement_portage asap
3133 asap_nodes.append(replacement_portage)
3135 if running_portage is not None:
3137 portage_rdepend = self._select_atoms_highest_available(
3138 running_root, running_portage.metadata["RDEPEND"],
3139 myuse=running_portage.use.enabled,
3140 parent=running_portage, strict=False)
3141 except portage.exception.InvalidDependString, e:
3142 portage.writemsg("!!! Invalid RDEPEND in " + \
3143 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3144 (running_root, running_portage.cpv, e), noiselevel=-1)
3146 portage_rdepend = []
3147 runtime_deps.update(atom for atom in portage_rdepend \
3148 if not atom.startswith("!"))
3150 def gather_deps(ignore_priority, mergeable_nodes,
3151 selected_nodes, node):
3153 Recursively gather a group of nodes that RDEPEND on
3154 eachother. This ensures that they are merged as a group
3155 and get their RDEPENDs satisfied as soon as possible.
3157 if node in selected_nodes:
3159 if node not in mergeable_nodes:
3161 if node == replacement_portage and \
3162 mygraph.child_nodes(node,
3163 ignore_priority=priority_range.ignore_medium_soft):
3164 # Make sure that portage always has all of it's
3165 # RDEPENDs installed first.
3167 selected_nodes.add(node)
3168 for child in mygraph.child_nodes(node,
3169 ignore_priority=ignore_priority):
3170 if not gather_deps(ignore_priority,
3171 mergeable_nodes, selected_nodes, child):
3175 def ignore_uninst_or_med(priority):
3176 if priority is BlockerDepPriority.instance:
3178 return priority_range.ignore_medium(priority)
3180 def ignore_uninst_or_med_soft(priority):
3181 if priority is BlockerDepPriority.instance:
3183 return priority_range.ignore_medium_soft(priority)
3185 tree_mode = "--tree" in self.myopts
3186 # Tracks whether or not the current iteration should prefer asap_nodes
3187 # if available. This is set to False when the previous iteration
3188 # failed to select any nodes. It is reset whenever nodes are
3189 # successfully selected.
3192 # Controls whether or not the current iteration should drop edges that
3193 # are "satisfied" by installed packages, in order to solve circular
3194 # dependencies. The deep runtime dependencies of installed packages are
3195 # not checked in this case (bug #199856), so it must be avoided
3196 # whenever possible.
3197 drop_satisfied = False
3199 # State of variables for successive iterations that loosen the
3200 # criteria for node selection.
3202 # iteration prefer_asap drop_satisfied
3207 # If no nodes are selected on the last iteration, it is due to
3208 # unresolved blockers or circular dependencies.
3210 while not mygraph.empty():
3211 self.spinner.update()
3212 selected_nodes = None
3213 ignore_priority = None
3214 if drop_satisfied or (prefer_asap and asap_nodes):
3215 priority_range = DepPrioritySatisfiedRange
3217 priority_range = DepPriorityNormalRange
3218 if prefer_asap and asap_nodes:
3219 # ASAP nodes are merged before their soft deps. Go ahead and
3220 # select root nodes here if necessary, since it's typical for
3221 # the parent to have been removed from the graph already.
3222 asap_nodes = [node for node in asap_nodes \
3223 if mygraph.contains(node)]
3224 for node in asap_nodes:
3225 if not mygraph.child_nodes(node,
3226 ignore_priority=priority_range.ignore_soft):
3227 selected_nodes = [node]
3228 asap_nodes.remove(node)
3230 if not selected_nodes and \
3231 not (prefer_asap and asap_nodes):
3232 for i in xrange(priority_range.NONE,
3233 priority_range.MEDIUM_SOFT + 1):
3234 ignore_priority = priority_range.ignore_priority[i]
3235 nodes = get_nodes(ignore_priority=ignore_priority)
3237 # If there is a mix of uninstall nodes with other
3238 # types, save the uninstall nodes for later since
3239 # sometimes a merge node will render an uninstall
3240 # node unnecessary (due to occupying the same slot),
3241 # and we want to avoid executing a separate uninstall
3242 # task in that case.
3244 good_uninstalls = []
3245 with_some_uninstalls_excluded = []
3247 if node.operation == "uninstall":
3248 slot_node = self.mydbapi[node.root
3249 ].match_pkgs(node.slot_atom)
3251 slot_node[0].operation == "merge":
3253 good_uninstalls.append(node)
3254 with_some_uninstalls_excluded.append(node)
3256 nodes = good_uninstalls
3257 elif with_some_uninstalls_excluded:
3258 nodes = with_some_uninstalls_excluded
3262 if ignore_priority is None and not tree_mode:
3263 # Greedily pop all of these nodes since no
3264 # relationship has been ignored. This optimization
3265 # destroys --tree output, so it's disabled in tree
3267 selected_nodes = nodes
3269 # For optimal merge order:
3270 # * Only pop one node.
3271 # * Removing a root node (node without a parent)
3272 # will not produce a leaf node, so avoid it.
3273 # * It's normal for a selected uninstall to be a
3274 # root node, so don't check them for parents.
3276 if node.operation == "uninstall" or \
3277 mygraph.parent_nodes(node):
3278 selected_nodes = [node]
3284 if not selected_nodes:
3285 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
3287 mergeable_nodes = set(nodes)
3288 if prefer_asap and asap_nodes:
3290 for i in xrange(priority_range.SOFT,
3291 priority_range.MEDIUM_SOFT + 1):
3292 ignore_priority = priority_range.ignore_priority[i]
3294 if not mygraph.parent_nodes(node):
3296 selected_nodes = set()
3297 if gather_deps(ignore_priority,
3298 mergeable_nodes, selected_nodes, node):
3301 selected_nodes = None
3305 if prefer_asap and asap_nodes and not selected_nodes:
3306 # We failed to find any asap nodes to merge, so ignore
3307 # them for the next iteration.
3311 if selected_nodes and ignore_priority is not None:
3312 # Try to merge ignored medium_soft deps as soon as possible
3313 # if they're not satisfied by installed packages.
3314 for node in selected_nodes:
3315 children = set(mygraph.child_nodes(node))
3316 soft = children.difference(
3317 mygraph.child_nodes(node,
3318 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
3319 medium_soft = children.difference(
3320 mygraph.child_nodes(node,
3322 DepPrioritySatisfiedRange.ignore_medium_soft))
3323 medium_soft.difference_update(soft)
3324 for child in medium_soft:
3325 if child in selected_nodes:
3327 if child in asap_nodes:
3329 asap_nodes.append(child)
3331 if selected_nodes and len(selected_nodes) > 1:
3332 if not isinstance(selected_nodes, list):
3333 selected_nodes = list(selected_nodes)
3334 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
3336 if not selected_nodes and not myblocker_uninstalls.is_empty():
3337 # An Uninstall task needs to be executed in order to
3338 # avoid conflict if possible.
3341 priority_range = DepPrioritySatisfiedRange
3343 priority_range = DepPriorityNormalRange
3345 mergeable_nodes = get_nodes(
3346 ignore_priority=ignore_uninst_or_med)
3348 min_parent_deps = None
3350 for task in myblocker_uninstalls.leaf_nodes():
3351 # Do some sanity checks so that system or world packages
3352 # don't get uninstalled inappropriately here (only really
3353 # necessary when --complete-graph has not been enabled).
3355 if task in ignored_uninstall_tasks:
3358 if task in scheduled_uninstalls:
3359 # It's been scheduled but it hasn't
3360 # been executed yet due to dependence
3361 # on installation of blocking packages.
3364 root_config = self.roots[task.root]
3365 inst_pkg = self._pkg_cache[
3366 ("installed", task.root, task.cpv, "nomerge")]
3368 if self.digraph.contains(inst_pkg):
3371 forbid_overlap = False
3372 heuristic_overlap = False
3373 for blocker in myblocker_uninstalls.parent_nodes(task):
3374 if blocker.eapi in ("0", "1"):
3375 heuristic_overlap = True
3376 elif blocker.atom.blocker.overlap.forbid:
3377 forbid_overlap = True
3379 if forbid_overlap and running_root == task.root:
3382 if heuristic_overlap and running_root == task.root:
3383 # Never uninstall sys-apps/portage or it's essential
3384 # dependencies, except through replacement.
3386 runtime_dep_atoms = \
3387 list(runtime_deps.iterAtomsForPackage(task))
3388 except portage.exception.InvalidDependString, e:
3389 portage.writemsg("!!! Invalid PROVIDE in " + \
3390 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3391 (task.root, task.cpv, e), noiselevel=-1)
3395 # Don't uninstall a runtime dep if it appears
3396 # to be the only suitable one installed.
3398 vardb = root_config.trees["vartree"].dbapi
3399 for atom in runtime_dep_atoms:
3400 other_version = None
3401 for pkg in vardb.match_pkgs(atom):
3402 if pkg.cpv == task.cpv and \
3403 pkg.metadata["COUNTER"] == \
3404 task.metadata["COUNTER"]:
3408 if other_version is None:
3414 # For packages in the system set, don't take
3415 # any chances. If the conflict can't be resolved
3416 # by a normal replacement operation then abort.
3419 for atom in root_config.sets[
3420 "system"].iterAtomsForPackage(task):
3423 except portage.exception.InvalidDependString, e:
3424 portage.writemsg("!!! Invalid PROVIDE in " + \
3425 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3426 (task.root, task.cpv, e), noiselevel=-1)
3432 # Note that the world check isn't always
3433 # necessary since self._complete_graph() will
3434 # add all packages from the system and world sets to the
3435 # graph. This just allows unresolved conflicts to be
3436 # detected as early as possible, which makes it possible
3437 # to avoid calling self._complete_graph() when it is
3438 # unnecessary due to blockers triggering an abortion.
3440 # For packages in the world set, go ahead an uninstall
3441 # when necessary, as long as the atom will be satisfied
3442 # in the final state.
3443 graph_db = self.mydbapi[task.root]
3446 for atom in root_config.sets[
3447 "world"].iterAtomsForPackage(task):
3449 for pkg in graph_db.match_pkgs(atom):
3456 self._blocked_world_pkgs[inst_pkg] = atom
3458 except portage.exception.InvalidDependString, e:
3459 portage.writemsg("!!! Invalid PROVIDE in " + \
3460 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3461 (task.root, task.cpv, e), noiselevel=-1)
3467 # Check the deps of parent nodes to ensure that
3468 # the chosen task produces a leaf node. Maybe
3469 # this can be optimized some more to make the
3470 # best possible choice, but the current algorithm
3471 # is simple and should be near optimal for most
3473 mergeable_parent = False
3475 for parent in mygraph.parent_nodes(task):
3476 parent_deps.update(mygraph.child_nodes(parent,
3477 ignore_priority=priority_range.ignore_medium_soft))
3478 if parent in mergeable_nodes and \
3479 gather_deps(ignore_uninst_or_med_soft,
3480 mergeable_nodes, set(), parent):
3481 mergeable_parent = True
3483 if not mergeable_parent:
3486 parent_deps.remove(task)
3487 if min_parent_deps is None or \
3488 len(parent_deps) < min_parent_deps:
3489 min_parent_deps = len(parent_deps)
3492 if uninst_task is not None:
3493 # The uninstall is performed only after blocking
3494 # packages have been merged on top of it. File
3495 # collisions between blocking packages are detected
3496 # and removed from the list of files to be uninstalled.
3497 scheduled_uninstalls.add(uninst_task)
3498 parent_nodes = mygraph.parent_nodes(uninst_task)
3500 # Reverse the parent -> uninstall edges since we want
3501 # to do the uninstall after blocking packages have
3502 # been merged on top of it.
3503 mygraph.remove(uninst_task)
3504 for blocked_pkg in parent_nodes:
3505 mygraph.add(blocked_pkg, uninst_task,
3506 priority=BlockerDepPriority.instance)
3507 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3508 scheduler_graph.add(blocked_pkg, uninst_task,
3509 priority=BlockerDepPriority.instance)
3511 # Reset the state variables for leaf node selection and
3512 # continue trying to select leaf nodes.
3514 drop_satisfied = False
3517 if not selected_nodes:
3518 # Only select root nodes as a last resort. This case should
3519 # only trigger when the graph is nearly empty and the only
3520 # remaining nodes are isolated (no parents or children). Since
3521 # the nodes must be isolated, ignore_priority is not needed.
3522 selected_nodes = get_nodes()
3524 if not selected_nodes and not drop_satisfied:
3525 drop_satisfied = True
3528 if not selected_nodes and not myblocker_uninstalls.is_empty():
3529 # If possible, drop an uninstall task here in order to avoid
3530 # the circular deps code path. The corresponding blocker will
3531 # still be counted as an unresolved conflict.
3533 for node in myblocker_uninstalls.leaf_nodes():
3535 mygraph.remove(node)
3540 ignored_uninstall_tasks.add(node)
3543 if uninst_task is not None:
3544 # Reset the state variables for leaf node selection and
3545 # continue trying to select leaf nodes.
3547 drop_satisfied = False
3550 if not selected_nodes:
3551 self._circular_deps_for_display = mygraph
3552 raise self._unknown_internal_error()
3554 # At this point, we've succeeded in selecting one or more nodes, so
3555 # reset state variables for leaf node selection.
3557 drop_satisfied = False
3559 mygraph.difference_update(selected_nodes)
3561 for node in selected_nodes:
3562 if isinstance(node, Package) and \
3563 node.operation == "nomerge":
3566 # Handle interactions between blockers
3567 # and uninstallation tasks.
3568 solved_blockers = set()
3570 if isinstance(node, Package) and \
3571 "uninstall" == node.operation:
3572 have_uninstall_task = True
3575 vardb = self.trees[node.root]["vartree"].dbapi
3576 previous_cpv = vardb.match(node.slot_atom)
3578 # The package will be replaced by this one, so remove
3579 # the corresponding Uninstall task if necessary.
3580 previous_cpv = previous_cpv[0]
3582 ("installed", node.root, previous_cpv, "uninstall")
3584 mygraph.remove(uninst_task)
3588 if uninst_task is not None and \
3589 uninst_task not in ignored_uninstall_tasks and \
3590 myblocker_uninstalls.contains(uninst_task):
3591 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3592 myblocker_uninstalls.remove(uninst_task)
3593 # Discard any blockers that this Uninstall solves.
3594 for blocker in blocker_nodes:
3595 if not myblocker_uninstalls.child_nodes(blocker):
3596 myblocker_uninstalls.remove(blocker)
3597 solved_blockers.add(blocker)
3599 retlist.append(node)
3601 if (isinstance(node, Package) and \
3602 "uninstall" == node.operation) or \
3603 (uninst_task is not None and \
3604 uninst_task in scheduled_uninstalls):
3605 # Include satisfied blockers in the merge list
3606 # since the user might be interested and also
3607 # it serves as an indicator that blocking packages
3608 # will be temporarily installed simultaneously.
3609 for blocker in solved_blockers:
3610 retlist.append(Blocker(atom=blocker.atom,
3611 root=blocker.root, eapi=blocker.eapi,
3614 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
3615 for node in myblocker_uninstalls.root_nodes():
3616 unsolvable_blockers.add(node)
3618 for blocker in unsolvable_blockers:
3619 retlist.append(blocker)
3621 # If any Uninstall tasks need to be executed in order
3622 # to avoid a conflict, complete the graph with any
3623 # dependencies that may have been initially
3624 # neglected (to ensure that unsafe Uninstall tasks
3625 # are properly identified and blocked from execution).
3626 if have_uninstall_task and \
3628 not unsolvable_blockers:
3629 self.myparams.add("complete")
3630 raise self._serialize_tasks_retry("")
3632 if unsolvable_blockers and \
3633 not self._accept_blocker_conflicts():
3634 self._unsatisfied_blockers_for_display = unsolvable_blockers
3635 self._serialized_tasks_cache = retlist[:]
3636 self._scheduler_graph = scheduler_graph
3637 raise self._unknown_internal_error()
3639 if self._slot_collision_info and \
3640 not self._accept_blocker_conflicts():
3641 self._serialized_tasks_cache = retlist[:]
3642 self._scheduler_graph = scheduler_graph
3643 raise self._unknown_internal_error()
3645 return retlist, scheduler_graph
3647 def _show_circular_deps(self, mygraph):
3648 # No leaf nodes are available, so we have a circular
3649 # dependency panic situation. Reduce the noise level to a
3650 # minimum via repeated elimination of root nodes since they
3651 # have no parents and thus can not be part of a cycle.
3653 root_nodes = mygraph.root_nodes(
3654 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3657 mygraph.difference_update(root_nodes)
3658 # Display the USE flags that are enabled on nodes that are part
3659 # of dependency cycles in case that helps the user decide to
3660 # disable some of them.
3662 tempgraph = mygraph.copy()
3663 while not tempgraph.empty():
3664 nodes = tempgraph.leaf_nodes()
3666 node = tempgraph.order[0]
3669 display_order.append(node)
3670 tempgraph.remove(node)
3671 display_order.reverse()
3672 self.myopts.pop("--quiet", None)
3673 self.myopts.pop("--verbose", None)
3674 self.myopts["--tree"] = True
3675 portage.writemsg("\n\n", noiselevel=-1)
3676 self.display(display_order)
3677 prefix = colorize("BAD", " * ")
3678 portage.writemsg("\n", noiselevel=-1)
3679 portage.writemsg(prefix + "Error: circular dependencies:\n",
3681 portage.writemsg("\n", noiselevel=-1)
3682 mygraph.debug_print()
3683 portage.writemsg("\n", noiselevel=-1)
3684 portage.writemsg(prefix + "Note that circular dependencies " + \
3685 "can often be avoided by temporarily\n", noiselevel=-1)
3686 portage.writemsg(prefix + "disabling USE flags that trigger " + \
3687 "optional dependencies.\n", noiselevel=-1)
3689 def _show_merge_list(self):
3690 if self._serialized_tasks_cache is not None and \
3691 not (self._displayed_list and \
3692 (self._displayed_list == self._serialized_tasks_cache or \
3693 self._displayed_list == \
3694 list(reversed(self._serialized_tasks_cache)))):
3695 display_list = self._serialized_tasks_cache[:]
3696 if "--tree" in self.myopts:
3697 display_list.reverse()
3698 self.display(display_list)
3700 def _show_unsatisfied_blockers(self, blockers):
3701 self._show_merge_list()
3702 msg = "Error: The above package list contains " + \
3703 "packages which cannot be installed " + \
3704 "at the same time on the same system."
3705 prefix = colorize("BAD", " * ")
3706 from textwrap import wrap
3707 portage.writemsg("\n", noiselevel=-1)
3708 for line in wrap(msg, 70):
3709 portage.writemsg(prefix + line + "\n", noiselevel=-1)
3711 # Display the conflicting packages along with the packages
3712 # that pulled them in. This is helpful for troubleshooting
3713 # cases in which blockers don't solve automatically and
3714 # the reasons are not apparent from the normal merge list
3718 for blocker in blockers:
3719 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
3720 self._blocker_parents.parent_nodes(blocker)):
3721 parent_atoms = self._parent_atoms.get(pkg)
3722 if not parent_atoms:
3723 atom = self._blocked_world_pkgs.get(pkg)
3724 if atom is not None:
3725 parent_atoms = set([("@world", atom)])
3727 conflict_pkgs[pkg] = parent_atoms
3730 # Reduce noise by pruning packages that are only
3731 # pulled in by other conflict packages.
3733 for pkg, parent_atoms in conflict_pkgs.iteritems():
3734 relevant_parent = False
3735 for parent, atom in parent_atoms:
3736 if parent not in conflict_pkgs:
3737 relevant_parent = True
3739 if not relevant_parent:
3740 pruned_pkgs.add(pkg)
3741 for pkg in pruned_pkgs:
3742 del conflict_pkgs[pkg]
3748 # Max number of parents shown, to avoid flooding the display.
3750 for pkg, parent_atoms in conflict_pkgs.iteritems():
3754 # Prefer packages that are not directly involved in a conflict.
3755 for parent_atom in parent_atoms:
3756 if len(pruned_list) >= max_parents:
3758 parent, atom = parent_atom
3759 if parent not in conflict_pkgs:
3760 pruned_list.add(parent_atom)
3762 for parent_atom in parent_atoms:
3763 if len(pruned_list) >= max_parents:
3765 pruned_list.add(parent_atom)
3767 omitted_parents = len(parent_atoms) - len(pruned_list)
3768 msg.append(indent + "%s pulled in by\n" % pkg)
3770 for parent_atom in pruned_list:
3771 parent, atom = parent_atom
3772 msg.append(2*indent)
3773 if isinstance(parent,
3774 (PackageArg, AtomArg)):
3775 # For PackageArg and AtomArg types, it's
3776 # redundant to display the atom attribute.
3777 msg.append(str(parent))
3779 # Display the specific atom from SetArg or
3781 msg.append("%s required by %s" % (atom, parent))
3785 msg.append(2*indent)
3786 msg.append("(and %d more)\n" % omitted_parents)
3790 sys.stderr.write("".join(msg))
3793 if "--quiet" not in self.myopts:
3794 show_blocker_docs_link()
3796 def display(self, mylist, favorites=[], verbosity=None):
3798 # This is used to prevent display_problems() from
3799 # redundantly displaying this exact same merge list
3800 # again via _show_merge_list().
3801 self._displayed_list = mylist
3803 if verbosity is None:
3804 verbosity = ("--quiet" in self.myopts and 1 or \
3805 "--verbose" in self.myopts and 3 or 2)
3806 favorites_set = InternalPackageSet(favorites)
3807 oneshot = "--oneshot" in self.myopts or \
3808 "--onlydeps" in self.myopts
3809 columns = "--columns" in self.myopts
3814 counters = PackageCounters()
3816 if verbosity == 1 and "--verbose" not in self.myopts:
3817 def create_use_string(*args):
3820 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3822 is_new, reinst_flags,
3823 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
3824 alphabetical=("--alphabetical" in self.myopts)):
3832 cur_iuse = set(cur_iuse)
3833 enabled_flags = cur_iuse.intersection(cur_use)
3834 removed_iuse = set(old_iuse).difference(cur_iuse)
3835 any_iuse = cur_iuse.union(old_iuse)
3836 any_iuse = list(any_iuse)
3838 for flag in any_iuse:
3841 reinst_flag = reinst_flags and flag in reinst_flags
3842 if flag in enabled_flags:
3844 if is_new or flag in old_use and \
3845 (all_flags or reinst_flag):
3846 flag_str = red(flag)
3847 elif flag not in old_iuse:
3848 flag_str = yellow(flag) + "%*"
3849 elif flag not in old_use:
3850 flag_str = green(flag) + "*"
3851 elif flag in removed_iuse:
3852 if all_flags or reinst_flag:
3853 flag_str = yellow("-" + flag) + "%"
3856 flag_str = "(" + flag_str + ")"
3857 removed.append(flag_str)
3860 if is_new or flag in old_iuse and \
3861 flag not in old_use and \
3862 (all_flags or reinst_flag):
3863 flag_str = blue("-" + flag)
3864 elif flag not in old_iuse:
3865 flag_str = yellow("-" + flag)
3866 if flag not in iuse_forced:
3868 elif flag in old_use:
3869 flag_str = green("-" + flag) + "*"
3871 if flag in iuse_forced:
3872 flag_str = "(" + flag_str + ")"
3874 enabled.append(flag_str)
3876 disabled.append(flag_str)
3879 ret = " ".join(enabled)
3881 ret = " ".join(enabled + disabled + removed)
3883 ret = '%s="%s" ' % (name, ret)
3886 repo_display = RepoDisplay(self.roots)
3890 mygraph = self.digraph.copy()
3892 # If there are any Uninstall instances, add the corresponding
3893 # blockers to the digraph (useful for --tree display).
3895 executed_uninstalls = set(node for node in mylist \
3896 if isinstance(node, Package) and node.operation == "unmerge")
3898 for uninstall in self._blocker_uninstalls.leaf_nodes():
3899 uninstall_parents = \
3900 self._blocker_uninstalls.parent_nodes(uninstall)
3901 if not uninstall_parents:
3904 # Remove the corresponding "nomerge" node and substitute
3905 # the Uninstall node.
3906 inst_pkg = self._pkg_cache[
3907 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
3909 mygraph.remove(inst_pkg)
3914 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
3916 inst_pkg_blockers = []
3918 # Break the Package -> Uninstall edges.
3919 mygraph.remove(uninstall)
3921 # Resolution of a package's blockers
3922 # depend on it's own uninstallation.
3923 for blocker in inst_pkg_blockers:
3924 mygraph.add(uninstall, blocker)
3926 # Expand Package -> Uninstall edges into
3927 # Package -> Blocker -> Uninstall edges.
3928 for blocker in uninstall_parents:
3929 mygraph.add(uninstall, blocker)
3930 for parent in self._blocker_parents.parent_nodes(blocker):
3931 if parent != inst_pkg:
3932 mygraph.add(blocker, parent)
3934 # If the uninstall task did not need to be executed because
3935 # of an upgrade, display Blocker -> Upgrade edges since the
3936 # corresponding Blocker -> Uninstall edges will not be shown.
3938 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
3939 if upgrade_node is not None and \
3940 uninstall not in executed_uninstalls:
3941 for blocker in uninstall_parents:
3942 mygraph.add(upgrade_node, blocker)
3944 unsatisfied_blockers = []
3949 if isinstance(x, Blocker) and not x.satisfied:
3950 unsatisfied_blockers.append(x)
3953 if "--tree" in self.myopts:
3954 depth = len(tree_nodes)
3955 while depth and graph_key not in \
3956 mygraph.child_nodes(tree_nodes[depth-1]):
3959 tree_nodes = tree_nodes[:depth]
3960 tree_nodes.append(graph_key)
3961 display_list.append((x, depth, True))
3962 shown_edges.add((graph_key, tree_nodes[depth-1]))
3964 traversed_nodes = set() # prevent endless circles
3965 traversed_nodes.add(graph_key)
3966 def add_parents(current_node, ordered):
3968 # Do not traverse to parents if this node is an
3969 # an argument or a direct member of a set that has
3970 # been specified as an argument (system or world).
3971 if current_node not in self._set_nodes:
3972 parent_nodes = mygraph.parent_nodes(current_node)
3974 child_nodes = set(mygraph.child_nodes(current_node))
3975 selected_parent = None
3976 # First, try to avoid a direct cycle.
3977 for node in parent_nodes:
3978 if not isinstance(node, (Blocker, Package)):
3980 if node not in traversed_nodes and \
3981 node not in child_nodes:
3982 edge = (current_node, node)
3983 if edge in shown_edges:
3985 selected_parent = node
3987 if not selected_parent:
3988 # A direct cycle is unavoidable.
3989 for node in parent_nodes:
3990 if not isinstance(node, (Blocker, Package)):
3992 if node not in traversed_nodes:
3993 edge = (current_node, node)
3994 if edge in shown_edges:
3996 selected_parent = node
3999 shown_edges.add((current_node, selected_parent))
4000 traversed_nodes.add(selected_parent)
4001 add_parents(selected_parent, False)
4002 display_list.append((current_node,
4003 len(tree_nodes), ordered))
4004 tree_nodes.append(current_node)
4006 add_parents(graph_key, True)
4008 display_list.append((x, depth, True))
4009 mylist = display_list
4010 for x in unsatisfied_blockers:
4011 mylist.append((x, 0, True))
4013 last_merge_depth = 0
4014 for i in xrange(len(mylist)-1,-1,-1):
4015 graph_key, depth, ordered = mylist[i]
4016 if not ordered and depth == 0 and i > 0 \
4017 and graph_key == mylist[i-1][0] and \
4018 mylist[i-1][1] == 0:
4019 # An ordered node got a consecutive duplicate when the tree was
4023 if ordered and graph_key[-1] != "nomerge":
4024 last_merge_depth = depth
4026 if depth >= last_merge_depth or \
4027 i < len(mylist) - 1 and \
4028 depth >= mylist[i+1][1]:
4031 from portage import flatten
4032 from portage.dep import use_reduce, paren_reduce
4033 # files to fetch list - avoids counting a same file twice
4034 # in size display (verbose mode)
4037 # Use this set to detect when all the "repoadd" strings are "[0]"
4038 # and disable the entire repo display in this case.
4041 for mylist_index in xrange(len(mylist)):
4042 x, depth, ordered = mylist[mylist_index]
4046 portdb = self.trees[myroot]["porttree"].dbapi
4047 bindb = self.trees[myroot]["bintree"].dbapi
4048 vardb = self.trees[myroot]["vartree"].dbapi
4049 vartree = self.trees[myroot]["vartree"]
4050 pkgsettings = self.pkgsettings[myroot]
4053 indent = " " * depth
4055 if isinstance(x, Blocker):
4057 blocker_style = "PKG_BLOCKER_SATISFIED"
4058 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4060 blocker_style = "PKG_BLOCKER"
4061 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4063 counters.blocks += 1
4065 counters.blocks_satisfied += 1
4066 resolved = portage.key_expand(
4067 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
4068 if "--columns" in self.myopts and "--quiet" in self.myopts:
4069 addl += " " + colorize(blocker_style, resolved)
4071 addl = "[%s %s] %s%s" % \
4072 (colorize(blocker_style, "blocks"),
4073 addl, indent, colorize(blocker_style, resolved))
4074 block_parents = self._blocker_parents.parent_nodes(x)
4075 block_parents = set([pnode[2] for pnode in block_parents])
4076 block_parents = ", ".join(block_parents)
4078 addl += colorize(blocker_style,
4079 " (\"%s\" is blocking %s)") % \
4080 (str(x.atom).lstrip("!"), block_parents)
4082 addl += colorize(blocker_style,
4083 " (is blocking %s)") % block_parents
4084 if isinstance(x, Blocker) and x.satisfied:
4089 blockers.append(addl)
4092 pkg_merge = ordered and pkg_status == "merge"
4093 if not pkg_merge and pkg_status == "merge":
4094 pkg_status = "nomerge"
4095 built = pkg_type != "ebuild"
4096 installed = pkg_type == "installed"
4098 metadata = pkg.metadata
4100 repo_name = metadata["repository"]
4101 if pkg_type == "ebuild":
4102 ebuild_path = portdb.findname(pkg_key)
4103 if not ebuild_path: # shouldn't happen
4104 raise portage.exception.PackageNotFound(pkg_key)
4105 repo_path_real = os.path.dirname(os.path.dirname(
4106 os.path.dirname(ebuild_path)))
4108 repo_path_real = portdb.getRepositoryPath(repo_name)
4109 pkg_use = list(pkg.use.enabled)
4111 restrict = flatten(use_reduce(paren_reduce(
4112 pkg.metadata["RESTRICT"]), uselist=pkg_use))
4113 except portage.exception.InvalidDependString, e:
4114 if not pkg.installed:
4115 show_invalid_depstring_notice(x,
4116 pkg.metadata["RESTRICT"], str(e))
4120 if "ebuild" == pkg_type and x[3] != "nomerge" and \
4121 "fetch" in restrict:
4124 counters.restrict_fetch += 1
4125 if portdb.fetch_check(pkg_key, pkg_use):
4128 counters.restrict_fetch_satisfied += 1
4130 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4131 #param is used for -u, where you still *do* want to see when something is being upgraded.
4134 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4135 if vardb.cpv_exists(pkg_key):
4136 addl=" "+yellow("R")+fetch+" "
4139 counters.reinst += 1
4140 elif pkg_status == "uninstall":
4141 counters.uninst += 1
4142 # filter out old-style virtual matches
4143 elif installed_versions and \
4144 portage.cpv_getkey(installed_versions[0]) == \
4145 portage.cpv_getkey(pkg_key):
4146 myinslotlist = vardb.match(pkg.slot_atom)
4147 # If this is the first install of a new-style virtual, we
4148 # need to filter out old-style virtual matches.
4149 if myinslotlist and \
4150 portage.cpv_getkey(myinslotlist[0]) != \
4151 portage.cpv_getkey(pkg_key):
4154 myoldbest = myinslotlist[:]
4156 if not portage.dep.cpvequal(pkg_key,
4157 portage.best([pkg_key] + myoldbest)):
4159 addl += turquoise("U")+blue("D")
4161 counters.downgrades += 1
4164 addl += turquoise("U") + " "
4166 counters.upgrades += 1
4168 # New slot, mark it new.
4169 addl = " " + green("NS") + fetch + " "
4170 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4172 counters.newslot += 1
4174 if "--changelog" in self.myopts:
4175 inst_matches = vardb.match(pkg.slot_atom)
4177 changelogs.extend(self.calc_changelog(
4178 portdb.findname(pkg_key),
4179 inst_matches[0], pkg_key))
4181 addl = " " + green("N") + " " + fetch + " "
4190 forced_flags = set()
4191 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
4192 forced_flags.update(pkgsettings.useforce)
4193 forced_flags.update(pkgsettings.usemask)
4195 cur_use = [flag for flag in pkg.use.enabled \
4196 if flag in pkg.iuse.all]
4197 cur_iuse = sorted(pkg.iuse.all)
4199 if myoldbest and myinslotlist:
4200 previous_cpv = myoldbest[0]
4202 previous_cpv = pkg.cpv
4203 if vardb.cpv_exists(previous_cpv):
4204 old_iuse, old_use = vardb.aux_get(
4205 previous_cpv, ["IUSE", "USE"])
4206 old_iuse = list(set(
4207 filter_iuse_defaults(old_iuse.split())))
4209 old_use = old_use.split()
4216 old_use = [flag for flag in old_use if flag in old_iuse]
4218 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4220 use_expand.reverse()
4221 use_expand_hidden = \
4222 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4224 def map_to_use_expand(myvals, forcedFlags=False,
4228 for exp in use_expand:
4231 for val in myvals[:]:
4232 if val.startswith(exp.lower()+"_"):
4233 if val in forced_flags:
4234 forced[exp].add(val[len(exp)+1:])
4235 ret[exp].append(val[len(exp)+1:])
4238 forced["USE"] = [val for val in myvals \
4239 if val in forced_flags]
4241 for exp in use_expand_hidden:
4247 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4248 # are the only thing that triggered reinstallation.
4249 reinst_flags_map = {}
4250 reinstall_for_flags = self._reinstall_nodes.get(pkg)
4251 reinst_expand_map = None
4252 if reinstall_for_flags:
4253 reinst_flags_map = map_to_use_expand(
4254 list(reinstall_for_flags), removeHidden=False)
4255 for k in list(reinst_flags_map):
4256 if not reinst_flags_map[k]:
4257 del reinst_flags_map[k]
4258 if not reinst_flags_map.get("USE"):
4259 reinst_expand_map = reinst_flags_map.copy()
4260 reinst_expand_map.pop("USE", None)
4261 if reinst_expand_map and \
4262 not set(reinst_expand_map).difference(
4264 use_expand_hidden = \
4265 set(use_expand_hidden).difference(
4268 cur_iuse_map, iuse_forced = \
4269 map_to_use_expand(cur_iuse, forcedFlags=True)
4270 cur_use_map = map_to_use_expand(cur_use)
4271 old_iuse_map = map_to_use_expand(old_iuse)
4272 old_use_map = map_to_use_expand(old_use)
4275 use_expand.insert(0, "USE")
4277 for key in use_expand:
4278 if key in use_expand_hidden:
4280 verboseadd += create_use_string(key.upper(),
4281 cur_iuse_map[key], iuse_forced[key],
4282 cur_use_map[key], old_iuse_map[key],
4283 old_use_map[key], is_new,
4284 reinst_flags_map.get(key))
4289 if pkg_type == "ebuild" and pkg_merge:
4291 myfilesdict = portdb.getfetchsizes(pkg_key,
4292 useflags=pkg_use, debug=self.edebug)
4293 except portage.exception.InvalidDependString, e:
4294 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4295 show_invalid_depstring_notice(x, src_uri, str(e))
4298 if myfilesdict is None:
4299 myfilesdict="[empty/missing/bad digest]"
4301 for myfetchfile in myfilesdict:
4302 if myfetchfile not in myfetchlist:
4303 mysize+=myfilesdict[myfetchfile]
4304 myfetchlist.append(myfetchfile)
4306 counters.totalsize += mysize
4307 verboseadd += format_size(mysize)
4310 # assign index for a previous version in the same slot
4311 has_previous = False
4312 repo_name_prev = None
4313 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4315 slot_matches = vardb.match(slot_atom)
4318 repo_name_prev = vardb.aux_get(slot_matches[0],
4321 # now use the data to generate output
4322 if pkg.installed or not has_previous:
4323 repoadd = repo_display.repoStr(repo_path_real)
4325 repo_path_prev = None
4327 repo_path_prev = portdb.getRepositoryPath(
4329 if repo_path_prev == repo_path_real:
4330 repoadd = repo_display.repoStr(repo_path_real)
4332 repoadd = "%s=>%s" % (
4333 repo_display.repoStr(repo_path_prev),
4334 repo_display.repoStr(repo_path_real))
4336 repoadd_set.add(repoadd)
4338 xs = [portage.cpv_getkey(pkg_key)] + \
4339 list(portage.catpkgsplit(pkg_key)[2:])
4346 if "COLUMNWIDTH" in self.settings:
4348 mywidth = int(self.settings["COLUMNWIDTH"])
4349 except ValueError, e:
4350 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4352 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4353 self.settings["COLUMNWIDTH"], noiselevel=-1)
4355 oldlp = mywidth - 30
4358 # Convert myoldbest from a list to a string.
4362 for pos, key in enumerate(myoldbest):
4363 key = portage.catpkgsplit(key)[2] + \
4364 "-" + portage.catpkgsplit(key)[3]
4365 if key[-3:] == "-r0":
4367 myoldbest[pos] = key
4368 myoldbest = blue("["+", ".join(myoldbest)+"]")
4371 root_config = self.roots[myroot]
4372 system_set = root_config.sets["system"]
4373 world_set = root_config.sets["world"]
4378 pkg_system = system_set.findAtomForPackage(pkg)
4379 pkg_world = world_set.findAtomForPackage(pkg)
4380 if not (oneshot or pkg_world) and \
4381 myroot == self.target_root and \
4382 favorites_set.findAtomForPackage(pkg):
4383 # Maybe it will be added to world now.
4384 if create_world_atom(pkg, favorites_set, root_config):
4386 except portage.exception.InvalidDependString:
4387 # This is reported elsewhere if relevant.
4390 def pkgprint(pkg_str):
4393 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4395 return colorize("PKG_MERGE_WORLD", pkg_str)
4397 return colorize("PKG_MERGE", pkg_str)
4398 elif pkg_status == "uninstall":
4399 return colorize("PKG_UNINSTALL", pkg_str)
4402 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4404 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4406 return colorize("PKG_NOMERGE", pkg_str)
4409 properties = flatten(use_reduce(paren_reduce(
4410 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
4411 except portage.exception.InvalidDependString, e:
4412 if not pkg.installed:
4413 show_invalid_depstring_notice(pkg,
4414 pkg.metadata["PROPERTIES"], str(e))
4418 interactive = "interactive" in properties
4419 if interactive and pkg.operation == "merge":
4420 addl = colorize("WARN", "I") + addl[1:]
4422 counters.interactive += 1
4427 if "--columns" in self.myopts:
4428 if "--quiet" in self.myopts:
4429 myprint=addl+" "+indent+pkgprint(pkg_cp)
4430 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4431 myprint=myprint+myoldbest
4432 myprint=myprint+darkgreen("to "+x[1])
4436 myprint = "[%s] %s%s" % \
4437 (pkgprint(pkg_status.ljust(13)),
4438 indent, pkgprint(pkg.cp))
4440 myprint = "[%s %s] %s%s" % \
4441 (pkgprint(pkg.type_name), addl,
4442 indent, pkgprint(pkg.cp))
4443 if (newlp-nc_len(myprint)) > 0:
4444 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4445 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4446 if (oldlp-nc_len(myprint)) > 0:
4447 myprint=myprint+" "*(oldlp-nc_len(myprint))
4448 myprint=myprint+myoldbest
4449 myprint += darkgreen("to " + pkg.root)
4452 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4454 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4455 myprint += indent + pkgprint(pkg_key) + " " + \
4456 myoldbest + darkgreen("to " + myroot)
4458 if "--columns" in self.myopts:
4459 if "--quiet" in self.myopts:
4460 myprint=addl+" "+indent+pkgprint(pkg_cp)
4461 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4462 myprint=myprint+myoldbest
4466 myprint = "[%s] %s%s" % \
4467 (pkgprint(pkg_status.ljust(13)),
4468 indent, pkgprint(pkg.cp))
4470 myprint = "[%s %s] %s%s" % \
4471 (pkgprint(pkg.type_name), addl,
4472 indent, pkgprint(pkg.cp))
4473 if (newlp-nc_len(myprint)) > 0:
4474 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4475 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4476 if (oldlp-nc_len(myprint)) > 0:
4477 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4478 myprint += myoldbest
4481 myprint = "[%s] %s%s %s" % \
4482 (pkgprint(pkg_status.ljust(13)),
4483 indent, pkgprint(pkg.cpv),
4486 myprint = "[%s %s] %s%s %s" % \
4487 (pkgprint(pkg_type), addl, indent,
4488 pkgprint(pkg.cpv), myoldbest)
4490 if columns and pkg.operation == "uninstall":
4492 p.append((myprint, verboseadd, repoadd))
4494 if "--tree" not in self.myopts and \
4495 "--quiet" not in self.myopts and \
4496 not self._opts_no_restart.intersection(self.myopts) and \
4497 pkg.root == self._running_root.root and \
4498 portage.match_from_list(
4499 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4500 not vardb.cpv_exists(pkg.cpv) and \
4501 "--quiet" not in self.myopts:
4502 if mylist_index < len(mylist) - 1:
4503 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4504 p.append(colorize("WARN", " then resume the merge."))
4507 show_repos = repoadd_set and repoadd_set != set(["0"])
4510 if isinstance(x, basestring):
4511 out.write("%s\n" % (x,))
4514 myprint, verboseadd, repoadd = x
4517 myprint += " " + verboseadd
4519 if show_repos and repoadd:
4520 myprint += " " + teal("[%s]" % repoadd)
4522 out.write("%s\n" % (myprint,))
4531 sys.stdout.write(str(repo_display))
4533 if "--changelog" in self.myopts:
4535 for revision,text in changelogs:
4536 print bold('*'+revision)
4537 sys.stdout.write(text)
4542 def display_problems(self):
4544 Display problems with the dependency graph such as slot collisions.
4545 This is called internally by display() to show the problems _after_
4546 the merge list where it is most likely to be seen, but if display()
4547 is not going to be called then this method should be called explicitly
4548 to ensure that the user is notified of problems with the graph.
4550 All output goes to stderr, except for unsatisfied dependencies which
4551 go to stdout for parsing by programs such as autounmask.
4554 # Note that show_masked_packages() sends it's output to
4555 # stdout, and some programs such as autounmask parse the
4556 # output in cases when emerge bails out. However, when
4557 # show_masked_packages() is called for installed packages
4558 # here, the message is a warning that is more appropriate
4559 # to send to stderr, so temporarily redirect stdout to
4560 # stderr. TODO: Fix output code so there's a cleaner way
4561 # to redirect everything to stderr.
4566 sys.stdout = sys.stderr
4567 self._display_problems()
4573 # This goes to stdout for parsing by programs like autounmask.
4574 for pargs, kwargs in self._unsatisfied_deps_for_display:
4575 self._show_unsatisfied_dep(*pargs, **kwargs)
4577 def _display_problems(self):
4578 if self._circular_deps_for_display is not None:
4579 self._show_circular_deps(
4580 self._circular_deps_for_display)
4582 # The user is only notified of a slot conflict if
4583 # there are no unresolvable blocker conflicts.
4584 if self._unsatisfied_blockers_for_display is not None:
4585 self._show_unsatisfied_blockers(
4586 self._unsatisfied_blockers_for_display)
4588 self._show_slot_collision_notice()
4590 # TODO: Add generic support for "set problem" handlers so that
4591 # the below warnings aren't special cases for world only.
4593 if self._missing_args:
4594 world_problems = False
4595 if "world" in self._sets:
4596 # Filter out indirect members of world (from nested sets)
4597 # since only direct members of world are desired here.
4598 world_set = self.roots[self.target_root].sets["world"]
4599 for arg, atom in self._missing_args:
4600 if arg.name == "world" and atom in world_set:
4601 world_problems = True
4605 sys.stderr.write("\n!!! Problems have been " + \
4606 "detected with your world file\n")
4607 sys.stderr.write("!!! Please run " + \
4608 green("emaint --check world")+"\n\n")
4610 if self._missing_args:
4611 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4612 " Ebuilds for the following packages are either all\n")
4613 sys.stderr.write(colorize("BAD", "!!!") + \
4614 " masked or don't exist:\n")
4615 sys.stderr.write(" ".join(str(atom) for arg, atom in \
4616 self._missing_args) + "\n")
4618 if self._pprovided_args:
4620 for arg, atom in self._pprovided_args:
4621 if isinstance(arg, SetArg):
4623 arg_atom = (atom, atom)
4626 arg_atom = (arg.arg, atom)
4627 refs = arg_refs.setdefault(arg_atom, [])
4628 if parent not in refs:
4631 msg.append(bad("\nWARNING: "))
4632 if len(self._pprovided_args) > 1:
4633 msg.append("Requested packages will not be " + \
4634 "merged because they are listed in\n")
4636 msg.append("A requested package will not be " + \
4637 "merged because it is listed in\n")
4638 msg.append("package.provided:\n\n")
4639 problems_sets = set()
4640 for (arg, atom), refs in arg_refs.iteritems():
4643 problems_sets.update(refs)
4645 ref_string = ", ".join(["'%s'" % name for name in refs])
4646 ref_string = " pulled in by " + ref_string
4647 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4649 if "world" in problems_sets:
4650 msg.append("This problem can be solved in one of the following ways:\n\n")
4651 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4652 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4653 msg.append(" C) Remove offending entries from package.provided.\n\n")
4654 msg.append("The best course of action depends on the reason that an offending\n")
4655 msg.append("package.provided entry exists.\n\n")
4656 sys.stderr.write("".join(msg))
4658 masked_packages = []
4659 for pkg in self._masked_installed:
4660 root_config = pkg.root_config
4661 pkgsettings = self.pkgsettings[pkg.root]
4662 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4663 masked_packages.append((root_config, pkgsettings,
4664 pkg.cpv, pkg.metadata, mreasons))
4666 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4667 " The following installed packages are masked:\n")
4668 show_masked_packages(masked_packages)
4672 def calc_changelog(self,ebuildpath,current,next):
4673 if ebuildpath == None or not os.path.exists(ebuildpath):
4675 current = '-'.join(portage.catpkgsplit(current)[1:])
4676 if current.endswith('-r0'):
4677 current = current[:-3]
4678 next = '-'.join(portage.catpkgsplit(next)[1:])
4679 if next.endswith('-r0'):
4681 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
4683 changelog = open(changelogpath).read()
4684 except SystemExit, e:
4685 raise # Needed else can't exit
4688 divisions = self.find_changelog_tags(changelog)
4689 #print 'XX from',current,'to',next
4690 #for div,text in divisions: print 'XX',div
4691 # skip entries for all revisions above the one we are about to emerge
4692 for i in range(len(divisions)):
4693 if divisions[i][0]==next:
4694 divisions = divisions[i:]
4696 # find out how many entries we are going to display
4697 for i in range(len(divisions)):
4698 if divisions[i][0]==current:
4699 divisions = divisions[:i]
4702 # couldnt find the current revision in the list. display nothing
4706 def find_changelog_tags(self,changelog):
4710 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
4712 if release is not None:
4713 divs.append((release,changelog))
4715 if release is not None:
4716 divs.append((release,changelog[:match.start()]))
4717 changelog = changelog[match.end():]
4718 release = match.group(1)
4719 if release.endswith('.ebuild'):
4720 release = release[:-7]
4721 if release.endswith('-r0'):
4722 release = release[:-3]
4724 def saveNomergeFavorites(self):
4725 """Find atoms in favorites that are not in the mergelist and add them
4726 to the world file if necessary."""
4727 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4728 "--oneshot", "--onlydeps", "--pretend"):
4729 if x in self.myopts:
4731 root_config = self.roots[self.target_root]
4732 world_set = root_config.sets["world"]
4734 world_locked = False
4735 if hasattr(world_set, "lock"):
4739 if hasattr(world_set, "load"):
4740 world_set.load() # maybe it's changed on disk
4742 args_set = self._sets["args"]
4743 portdb = self.trees[self.target_root]["porttree"].dbapi
4744 added_favorites = set()
4745 for x in self._set_nodes:
4746 pkg_type, root, pkg_key, pkg_status = x
4747 if pkg_status != "nomerge":
4751 myfavkey = create_world_atom(x, args_set, root_config)
4753 if myfavkey in added_favorites:
4755 added_favorites.add(myfavkey)
4756 except portage.exception.InvalidDependString, e:
4757 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4758 (pkg_key, str(e)), noiselevel=-1)
4759 writemsg("!!! see '%s'\n\n" % os.path.join(
4760 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4763 for k in self._sets:
4764 if k in ("args", "world") or not root_config.sets[k].world_candidate:
4769 all_added.append(SETPREFIX + k)
4770 all_added.extend(added_favorites)
4773 print ">>> Recording %s in \"world\" favorites file..." % \
4774 colorize("INFORM", str(a))
4776 world_set.update(all_added)
4781 def loadResumeCommand(self, resume_data, skip_masked=True,
4784 Add a resume command to the graph and validate it in the process. This
4785 will raise a PackageNotFound exception if a package is not available.
4788 if not isinstance(resume_data, dict):
4791 mergelist = resume_data.get("mergelist")
4792 if not isinstance(mergelist, list):
4795 fakedb = self.mydbapi
4797 serialized_tasks = []
4800 if not (isinstance(x, list) and len(x) == 4):
4802 pkg_type, myroot, pkg_key, action = x
4803 if pkg_type not in self.pkg_tree_map:
4805 if action != "merge":
4807 tree_type = self.pkg_tree_map[pkg_type]
4808 mydb = trees[myroot][tree_type].dbapi
4809 db_keys = list(self._trees_orig[myroot][
4810 tree_type].dbapi._aux_cache_keys)
4812 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
4814 # It does no exist or it is corrupt.
4815 if action == "uninstall":
4818 # TODO: log these somewhere
4820 raise portage.exception.PackageNotFound(pkg_key)
4821 installed = action == "uninstall"
4822 built = pkg_type != "ebuild"
4823 root_config = self.roots[myroot]
4824 pkg = Package(built=built, cpv=pkg_key,
4825 installed=installed, metadata=metadata,
4826 operation=action, root_config=root_config,
4828 if pkg_type == "ebuild":
4829 pkgsettings = self.pkgsettings[myroot]
4830 pkgsettings.setcpv(pkg)
4831 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
4832 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
4833 self._pkg_cache[pkg] = pkg
4835 root_config = self.roots[pkg.root]
4836 if "merge" == pkg.operation and \
4837 not visible(root_config.settings, pkg):
4839 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4841 self._unsatisfied_deps_for_display.append(
4842 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
4844 fakedb[myroot].cpv_inject(pkg)
4845 serialized_tasks.append(pkg)
4846 self.spinner.update()
4848 if self._unsatisfied_deps_for_display:
4851 if not serialized_tasks or "--nodeps" in self.myopts:
4852 self._serialized_tasks_cache = serialized_tasks
4853 self._scheduler_graph = self.digraph
4855 self._select_package = self._select_pkg_from_graph
4856 self.myparams.add("selective")
4857 # Always traverse deep dependencies in order to account for
4858 # potentially unsatisfied dependencies of installed packages.
4859 # This is necessary for correct --keep-going or --resume operation
4860 # in case a package from a group of circularly dependent packages
4861 # fails. In this case, a package which has recently been installed
4862 # may have an unsatisfied circular dependency (pulled in by
4863 # PDEPEND, for example). So, even though a package is already
4864 # installed, it may not have all of it's dependencies satisfied, so
4865 # it may not be usable. If such a package is in the subgraph of
4866 # deep depenedencies of a scheduled build, that build needs to
4867 # be cancelled. In order for this type of situation to be
4868 # recognized, deep traversal of dependencies is required.
4869 self.myparams.add("deep")
4871 favorites = resume_data.get("favorites")
4872 args_set = self._sets["args"]
4873 if isinstance(favorites, list):
4874 args = self._load_favorites(favorites)
4878 for task in serialized_tasks:
4879 if isinstance(task, Package) and \
4880 task.operation == "merge":
4881 if not self._add_pkg(task, None):
4884 # Packages for argument atoms need to be explicitly
4885 # added via _add_pkg() so that they are included in the
4886 # digraph (needed at least for --tree display).
4888 for atom in arg.set:
4889 pkg, existing_node = self._select_package(
4890 arg.root_config.root, atom)
4891 if existing_node is None and \
4893 if not self._add_pkg(pkg, Dependency(atom=atom,
4894 root=pkg.root, parent=arg)):
4897 # Allow unsatisfied deps here to avoid showing a masking
4898 # message for an unsatisfied dep that isn't necessarily
4900 if not self._create_graph(allow_unsatisfied=True):
4903 unsatisfied_deps = []
4904 for dep in self._unsatisfied_deps:
4905 if not isinstance(dep.parent, Package):
4907 if dep.parent.operation == "merge":
4908 unsatisfied_deps.append(dep)
4911 # For unsatisfied deps of installed packages, only account for
4912 # them if they are in the subgraph of dependencies of a package
4913 # which is scheduled to be installed.
4914 unsatisfied_install = False
4916 dep_stack = self.digraph.parent_nodes(dep.parent)
4918 node = dep_stack.pop()
4919 if not isinstance(node, Package):
4921 if node.operation == "merge":
4922 unsatisfied_install = True
4924 if node in traversed:
4927 dep_stack.extend(self.digraph.parent_nodes(node))
4929 if unsatisfied_install:
4930 unsatisfied_deps.append(dep)
4932 if masked_tasks or unsatisfied_deps:
4933 # This probably means that a required package
4934 # was dropped via --skipfirst. It makes the
4935 # resume list invalid, so convert it to a
4936 # UnsatisfiedResumeDep exception.
4937 raise self.UnsatisfiedResumeDep(self,
4938 masked_tasks + unsatisfied_deps)
4939 self._serialized_tasks_cache = None
4942 except self._unknown_internal_error:
4947 def _load_favorites(self, favorites):
4949 Use a list of favorites to resume state from a
4950 previous select_files() call. This creates similar
4951 DependencyArg instances to those that would have
4952 been created by the original select_files() call.
4953 This allows Package instances to be matched with
4954 DependencyArg instances during graph creation.
4956 root_config = self.roots[self.target_root]
4957 getSetAtoms = root_config.setconfig.getSetAtoms
4958 sets = root_config.sets
4961 if not isinstance(x, basestring):
4963 if x in ("system", "world"):
4965 if x.startswith(SETPREFIX):
4966 s = x[len(SETPREFIX):]
4971 # Recursively expand sets so that containment tests in
4972 # self._get_parent_sets() properly match atoms in nested
4973 # sets (like if world contains system).
4974 expanded_set = InternalPackageSet(
4975 initial_atoms=getSetAtoms(s))
4976 self._sets[s] = expanded_set
4977 args.append(SetArg(arg=x, set=expanded_set,
4978 root_config=root_config))
4980 if not portage.isvalidatom(x):
4982 args.append(AtomArg(arg=x, atom=x,
4983 root_config=root_config))
4985 self._set_args(args)
4988 class UnsatisfiedResumeDep(portage.exception.PortageException):
4990 A dependency of a resume list is not installed. This
4991 can occur when a required package is dropped from the
4992 merge list via --skipfirst.
4994 def __init__(self, depgraph, value):
4995 portage.exception.PortageException.__init__(self, value)
4996 self.depgraph = depgraph
4998 class _internal_exception(portage.exception.PortageException):
4999 def __init__(self, value=""):
5000 portage.exception.PortageException.__init__(self, value)
5002 class _unknown_internal_error(_internal_exception):
5004 Used by the depgraph internally to terminate graph creation.
5005 The specific reason for the failure should have been dumped
5006 to stderr, unfortunately, the exact reason for the failure
5010 class _serialize_tasks_retry(_internal_exception):
5012 This is raised by the _serialize_tasks() method when it needs to
5013 be called again for some reason. The only case that it's currently
5014 used for is when neglected dependencies need to be added to the
5015 graph in order to avoid making a potentially unsafe decision.
5018 class _dep_check_composite_db(portage.dbapi):
5020 A dbapi-like interface that is optimized for use in dep_check() calls.
5021 This is built on top of the existing depgraph package selection logic.
5022 Some packages that have been added to the graph may be masked from this
5023 view in order to influence the atom preference selection that occurs
5026 def __init__(self, depgraph, root):
5027 portage.dbapi.__init__(self)
5028 self._depgraph = depgraph
5030 self._match_cache = {}
5031 self._cpv_pkg_map = {}
5033 def _clear_cache(self):
5034 self._match_cache.clear()
5035 self._cpv_pkg_map.clear()
5037 def match(self, atom):
5038 ret = self._match_cache.get(atom)
5043 atom = self._dep_expand(atom)
5044 pkg, existing = self._depgraph._select_package(self._root, atom)
5048 # Return the highest available from select_package() as well as
5049 # any matching slots in the graph db.
5051 slots.add(pkg.metadata["SLOT"])
5052 atom_cp = portage.dep_getkey(atom)
5053 if pkg.cp.startswith("virtual/"):
5054 # For new-style virtual lookahead that occurs inside
5055 # dep_check(), examine all slots. This is needed
5056 # so that newer slots will not unnecessarily be pulled in
5057 # when a satisfying lower slot is already installed. For
5058 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5059 # there's no need to pull in a newer slot to satisfy a
5060 # virtual/jdk dependency.
5061 for db, pkg_type, built, installed, db_keys in \
5062 self._depgraph._filtered_trees[self._root]["dbs"]:
5063 for cpv in db.match(atom):
5064 if portage.cpv_getkey(cpv) != pkg.cp:
5066 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5068 if self._visible(pkg):
5069 self._cpv_pkg_map[pkg.cpv] = pkg
5071 slots.remove(pkg.metadata["SLOT"])
5073 slot_atom = "%s:%s" % (atom_cp, slots.pop())
5074 pkg, existing = self._depgraph._select_package(
5075 self._root, slot_atom)
5078 if not self._visible(pkg):
5080 self._cpv_pkg_map[pkg.cpv] = pkg
5083 self._cpv_sort_ascending(ret)
5084 self._match_cache[orig_atom] = ret
5087 def _visible(self, pkg):
5088 if pkg.installed and "selective" not in self._depgraph.myparams:
5090 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
5091 except (StopIteration, portage.exception.InvalidDependString):
5098 self._depgraph.pkgsettings[pkg.root], pkg):
5100 except portage.exception.InvalidDependString:
5102 in_graph = self._depgraph._slot_pkg_map[
5103 self._root].get(pkg.slot_atom)
5104 if in_graph is None:
5105 # Mask choices for packages which are not the highest visible
5106 # version within their slot (since they usually trigger slot
5108 highest_visible, in_graph = self._depgraph._select_package(
5109 self._root, pkg.slot_atom)
5110 if pkg != highest_visible:
5112 elif in_graph != pkg:
5113 # Mask choices for packages that would trigger a slot
5114 # conflict with a previously selected package.
5118 def _dep_expand(self, atom):
5120 This is only needed for old installed packages that may
5121 contain atoms that are not fully qualified with a specific
5122 category. Emulate the cpv_expand() function that's used by
5123 dbapi.match() in cases like this. If there are multiple
5124 matches, it's often due to a new-style virtual that has
5125 been added, so try to filter those out to avoid raising
5128 root_config = self._depgraph.roots[self._root]
5130 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5131 if len(expanded_atoms) > 1:
5132 non_virtual_atoms = []
5133 for x in expanded_atoms:
5134 if not portage.dep_getkey(x).startswith("virtual/"):
5135 non_virtual_atoms.append(x)
5136 if len(non_virtual_atoms) == 1:
5137 expanded_atoms = non_virtual_atoms
5138 if len(expanded_atoms) > 1:
5139 # compatible with portage.cpv_expand()
5140 raise portage.exception.AmbiguousPackageName(
5141 [portage.dep_getkey(x) for x in expanded_atoms])
5143 atom = expanded_atoms[0]
5145 null_atom = insert_category_into_atom(atom, "null")
5146 null_cp = portage.dep_getkey(null_atom)
5147 cat, atom_pn = portage.catsplit(null_cp)
5148 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5150 # Allow the resolver to choose which virtual.
5151 atom = insert_category_into_atom(atom, "virtual")
5153 atom = insert_category_into_atom(atom, "null")
5156 def aux_get(self, cpv, wants):
5157 metadata = self._cpv_pkg_map[cpv].metadata
5158 return [metadata.get(x, "") for x in wants]
5160 class Scheduler(PollScheduler):
5162 _opts_ignore_blockers = \
5163 frozenset(["--buildpkgonly",
5164 "--fetchonly", "--fetch-all-uri",
5165 "--nodeps", "--pretend"])
5167 _opts_no_background = \
5168 frozenset(["--pretend",
5169 "--fetchonly", "--fetch-all-uri"])
5171 _opts_no_restart = frozenset(["--buildpkgonly",
5172 "--fetchonly", "--fetch-all-uri", "--pretend"])
5174 _bad_resume_opts = set(["--ask", "--changelog",
5175 "--resume", "--skipfirst"])
5177 _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
5179 class _iface_class(SlotObject):
5180 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
5181 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
5182 "scheduleSetup", "scheduleUnpack", "scheduleYield",
5185 class _fetch_iface_class(SlotObject):
5186 __slots__ = ("log_file", "schedule")
5188 _task_queues_class = slot_dict_class(
5189 ("merge", "jobs", "fetch", "unpack"), prefix="")
5191 class _build_opts_class(SlotObject):
5192 __slots__ = ("buildpkg", "buildpkgonly",
5193 "fetch_all_uri", "fetchonly", "pretend")
5195 class _binpkg_opts_class(SlotObject):
5196 __slots__ = ("fetchonly", "getbinpkg", "pretend")
5198 class _pkg_count_class(SlotObject):
5199 __slots__ = ("curval", "maxval")
5201 class _emerge_log_class(SlotObject):
5202 __slots__ = ("xterm_titles",)
5204 def log(self, *pargs, **kwargs):
5205 if not self.xterm_titles:
5206 # Avoid interference with the scheduler's status display.
5207 kwargs.pop("short_msg", None)
5208 emergelog(self.xterm_titles, *pargs, **kwargs)
5210 class _failed_pkg(SlotObject):
5211 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
5213 class _ConfigPool(object):
5214 """Interface for a task to temporarily allocate a config
5215 instance from a pool. This allows a task to be constructed
5216 long before the config instance actually becomes needed, like
5217 when prefetchers are constructed for the whole merge list."""
5218 __slots__ = ("_root", "_allocate", "_deallocate")
5219 def __init__(self, root, allocate, deallocate):
5221 self._allocate = allocate
5222 self._deallocate = deallocate
5224 return self._allocate(self._root)
5225 def deallocate(self, settings):
5226 self._deallocate(settings)
5228 class _unknown_internal_error(portage.exception.PortageException):
5230 Used internally to terminate scheduling. The specific reason for
5231 the failure should have been dumped to stderr.
5233 def __init__(self, value=""):
5234 portage.exception.PortageException.__init__(self, value)
5236 def __init__(self, settings, trees, mtimedb, myopts,
5237 spinner, mergelist, favorites, digraph):
5238 PollScheduler.__init__(self)
5239 self.settings = settings
5240 self.target_root = settings["ROOT"]
5242 self.myopts = myopts
5243 self._spinner = spinner
5244 self._mtimedb = mtimedb
5245 self._mergelist = mergelist
5246 self._favorites = favorites
5247 self._args_set = InternalPackageSet(favorites)
5248 self._build_opts = self._build_opts_class()
5249 for k in self._build_opts.__slots__:
5250 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
5251 self._binpkg_opts = self._binpkg_opts_class()
5252 for k in self._binpkg_opts.__slots__:
5253 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
5256 self._logger = self._emerge_log_class()
5257 self._task_queues = self._task_queues_class()
5258 for k in self._task_queues.allowed_keys:
5259 setattr(self._task_queues, k,
5260 SequentialTaskQueue())
5262 # Holds merges that will wait to be executed when no builds are
5263 # executing. This is useful for system packages since dependencies
5264 # on system packages are frequently unspecified.
5265 self._merge_wait_queue = []
5266 # Holds merges that have been transfered from the merge_wait_queue to
5267 # the actual merge queue. They are removed from this list upon
5268 # completion. Other packages can start building only when this list is
5270 self._merge_wait_scheduled = []
5272 # Holds system packages and their deep runtime dependencies. Before
5273 # being merged, these packages go to merge_wait_queue, to be merged
5274 # when no other packages are building.
5275 self._deep_system_deps = set()
5277 # Holds packages to merge which will satisfy currently unsatisfied
5278 # deep runtime dependencies of system packages. If this is not empty
5279 # then no parallel builds will be spawned until it is empty. This
5280 # minimizes the possibility that a build will fail due to the system
5281 # being in a fragile state. For example, see bug #259954.
5282 self._unsatisfied_system_deps = set()
5284 self._status_display = JobStatusDisplay(
5285 xterm_titles=('notitles' not in settings.features))
5286 self._max_load = myopts.get("--load-average")
5287 max_jobs = myopts.get("--jobs")
5288 if max_jobs is None:
5290 self._set_max_jobs(max_jobs)
5292 # The root where the currently running
5293 # portage instance is installed.
5294 self._running_root = trees["/"]["root_config"]
5296 if settings.get("PORTAGE_DEBUG", "") == "1":
5298 self.pkgsettings = {}
5299 self._config_pool = {}
5300 self._blocker_db = {}
5302 self._config_pool[root] = []
5303 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
5305 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
5306 schedule=self._schedule_fetch)
5307 self._sched_iface = self._iface_class(
5308 dblinkEbuildPhase=self._dblink_ebuild_phase,
5309 dblinkDisplayMerge=self._dblink_display_merge,
5310 dblinkElog=self._dblink_elog,
5311 dblinkEmergeLog=self._dblink_emerge_log,
5312 fetch=fetch_iface, register=self._register,
5313 schedule=self._schedule_wait,
5314 scheduleSetup=self._schedule_setup,
5315 scheduleUnpack=self._schedule_unpack,
5316 scheduleYield=self._schedule_yield,
5317 unregister=self._unregister)
5319 self._prefetchers = weakref.WeakValueDictionary()
5320 self._pkg_queue = []
5321 self._completed_tasks = set()
5323 self._failed_pkgs = []
5324 self._failed_pkgs_all = []
5325 self._failed_pkgs_die_msgs = []
5326 self._post_mod_echo_msgs = []
5327 self._parallel_fetch = False
5328 merge_count = len([x for x in mergelist \
5329 if isinstance(x, Package) and x.operation == "merge"])
5330 self._pkg_count = self._pkg_count_class(
5331 curval=0, maxval=merge_count)
5332 self._status_display.maxval = self._pkg_count.maxval
5334 # The load average takes some time to respond when new
5335 # jobs are added, so we need to limit the rate of adding
5337 self._job_delay_max = 10
5338 self._job_delay_factor = 1.0
5339 self._job_delay_exp = 1.5
5340 self._previous_job_start_time = None
5342 self._set_digraph(digraph)
5344 # This is used to memoize the _choose_pkg() result when
5345 # no packages can be chosen until one of the existing
5347 self._choose_pkg_return_early = False
5349 features = self.settings.features
5350 if "parallel-fetch" in features and \
5351 not ("--pretend" in self.myopts or \
5352 "--fetch-all-uri" in self.myopts or \
5353 "--fetchonly" in self.myopts):
5354 if "distlocks" not in features:
5355 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
5356 portage.writemsg(red("!!!")+" parallel-fetching " + \
5357 "requires the distlocks feature enabled"+"\n",
5359 portage.writemsg(red("!!!")+" you have it disabled, " + \
5360 "thus parallel-fetching is being disabled"+"\n",
5362 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
5363 elif len(mergelist) > 1:
5364 self._parallel_fetch = True
5366 if self._parallel_fetch:
5367 # clear out existing fetch log if it exists
5369 open(self._fetch_log, 'w')
5370 except EnvironmentError:
5373 self._running_portage = None
5374 portage_match = self._running_root.trees["vartree"].dbapi.match(
5375 portage.const.PORTAGE_PACKAGE_ATOM)
5377 cpv = portage_match.pop()
5378 self._running_portage = self._pkg(cpv, "installed",
5379 self._running_root, installed=True)
5381 def _poll(self, timeout=None):
5383 PollScheduler._poll(self, timeout=timeout)
5385 def _set_max_jobs(self, max_jobs):
5386 self._max_jobs = max_jobs
5387 self._task_queues.jobs.max_jobs = max_jobs
5389 def _background_mode(self):
5391 Check if background mode is enabled and adjust states as necessary.
5394 @returns: True if background mode is enabled, False otherwise.
5396 background = (self._max_jobs is True or \
5397 self._max_jobs > 1 or "--quiet" in self.myopts) and \
5398 not bool(self._opts_no_background.intersection(self.myopts))
5401 interactive_tasks = self._get_interactive_tasks()
5402 if interactive_tasks:
5404 writemsg_level(">>> Sending package output to stdio due " + \
5405 "to interactive package(s):\n",
5406 level=logging.INFO, noiselevel=-1)
5408 for pkg in interactive_tasks:
5409 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
5411 pkg_str += " for " + pkg.root
5414 writemsg_level("".join("%s\n" % (l,) for l in msg),
5415 level=logging.INFO, noiselevel=-1)
5416 if self._max_jobs is True or self._max_jobs > 1:
5417 self._set_max_jobs(1)
5418 writemsg_level(">>> Setting --jobs=1 due " + \
5419 "to the above interactive package(s)\n",
5420 level=logging.INFO, noiselevel=-1)
5422 self._status_display.quiet = \
5424 ("--quiet" in self.myopts and \
5425 "--verbose" not in self.myopts)
5427 self._logger.xterm_titles = \
5428 "notitles" not in self.settings.features and \
5429 self._status_display.quiet
5433 def _get_interactive_tasks(self):
5434 from portage import flatten
5435 from portage.dep import use_reduce, paren_reduce
5436 interactive_tasks = []
5437 for task in self._mergelist:
5438 if not (isinstance(task, Package) and \
5439 task.operation == "merge"):
5442 properties = flatten(use_reduce(paren_reduce(
5443 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
5444 except portage.exception.InvalidDependString, e:
5445 show_invalid_depstring_notice(task,
5446 task.metadata["PROPERTIES"], str(e))
5447 raise self._unknown_internal_error()
5448 if "interactive" in properties:
5449 interactive_tasks.append(task)
5450 return interactive_tasks
5452 def _set_digraph(self, digraph):
5453 if "--nodeps" in self.myopts or \
5454 (self._max_jobs is not True and self._max_jobs < 2):
5456 self._digraph = None
5459 self._digraph = digraph
5460 self._find_system_deps()
5461 self._prune_digraph()
5462 self._prevent_builddir_collisions()
5464 def _find_system_deps(self):
5466 Find system packages and their deep runtime dependencies. Before being
5467 merged, these packages go to merge_wait_queue, to be merged when no
5468 other packages are building.
5470 deep_system_deps = self._deep_system_deps
5471 deep_system_deps.clear()
5472 deep_system_deps.update(
5473 _find_deep_system_runtime_deps(self._digraph))
5474 deep_system_deps.difference_update([pkg for pkg in \
5475 deep_system_deps if pkg.operation != "merge"])
5477 def _prune_digraph(self):
5479 Prune any root nodes that are irrelevant.
5482 graph = self._digraph
5483 completed_tasks = self._completed_tasks
5484 removed_nodes = set()
5486 for node in graph.root_nodes():
5487 if not isinstance(node, Package) or \
5488 (node.installed and node.operation == "nomerge") or \
5490 node in completed_tasks:
5491 removed_nodes.add(node)
5493 graph.difference_update(removed_nodes)
5494 if not removed_nodes:
5496 removed_nodes.clear()
5498 def _prevent_builddir_collisions(self):
5500 When building stages, sometimes the same exact cpv needs to be merged
5501 to both $ROOTs. Add edges to the digraph in order to avoid collisions
5502 in the builddir. Currently, normal file locks would be inappropriate
5503 for this purpose since emerge holds all of it's build dir locks from
5507 for pkg in self._mergelist:
5508 if not isinstance(pkg, Package):
5509 # a satisfied blocker
5513 if pkg.cpv not in cpv_map:
5514 cpv_map[pkg.cpv] = [pkg]
5516 for earlier_pkg in cpv_map[pkg.cpv]:
5517 self._digraph.add(earlier_pkg, pkg,
5518 priority=DepPriority(buildtime=True))
5519 cpv_map[pkg.cpv].append(pkg)
5521 class _pkg_failure(portage.exception.PortageException):
5523 An instance of this class is raised by unmerge() when
5524 an uninstallation fails.
5527 def __init__(self, *pargs):
5528 portage.exception.PortageException.__init__(self, pargs)
5530 self.status = pargs[0]
5532 def _schedule_fetch(self, fetcher):
5534 Schedule a fetcher on the fetch queue, in order to
5535 serialize access to the fetch log.
5537 self._task_queues.fetch.addFront(fetcher)
5539 def _schedule_setup(self, setup_phase):
5541 Schedule a setup phase on the merge queue, in order to
5542 serialize unsandboxed access to the live filesystem.
5544 self._task_queues.merge.addFront(setup_phase)
5547 def _schedule_unpack(self, unpack_phase):
5549 Schedule an unpack phase on the unpack queue, in order
5550 to serialize $DISTDIR access for live ebuilds.
5552 self._task_queues.unpack.add(unpack_phase)
5554 def _find_blockers(self, new_pkg):
5556 Returns a callable which should be called only when
5557 the vdb lock has been acquired.
5560 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
5563 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
5564 if self._opts_ignore_blockers.intersection(self.myopts):
5567 # Call gc.collect() here to avoid heap overflow that
5568 # triggers 'Cannot allocate memory' errors (reported
5573 blocker_db = self._blocker_db[new_pkg.root]
5575 blocker_dblinks = []
5576 for blocking_pkg in blocker_db.findInstalledBlockers(
5577 new_pkg, acquire_lock=acquire_lock):
5578 if new_pkg.slot_atom == blocking_pkg.slot_atom:
5580 if new_pkg.cpv == blocking_pkg.cpv:
5582 blocker_dblinks.append(portage.dblink(
5583 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
5584 self.pkgsettings[blocking_pkg.root], treetype="vartree",
5585 vartree=self.trees[blocking_pkg.root]["vartree"]))
5589 return blocker_dblinks
5591 def _dblink_pkg(self, pkg_dblink):
5592 cpv = pkg_dblink.mycpv
5593 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
5594 root_config = self.trees[pkg_dblink.myroot]["root_config"]
5595 installed = type_name == "installed"
5596 return self._pkg(cpv, type_name, root_config, installed=installed)
5598 def _append_to_log_path(self, log_path, msg):
5599 f = open(log_path, 'a')
5605 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
5607 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
5610 background = self._background
5612 if background and log_path is not None:
5613 log_file = open(log_path, 'a')
5618 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
5620 if log_file is not None:
5623 def _dblink_emerge_log(self, msg):
5624 self._logger.log(msg)
5626 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
5627 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
5628 background = self._background
5630 if log_path is None:
5631 if not (background and level < logging.WARN):
5632 portage.util.writemsg_level(msg,
5633 level=level, noiselevel=noiselevel)
5636 portage.util.writemsg_level(msg,
5637 level=level, noiselevel=noiselevel)
5638 self._append_to_log_path(log_path, msg)
5640 def _dblink_ebuild_phase(self,
5641 pkg_dblink, pkg_dbapi, ebuild_path, phase):
5643 Using this callback for merge phases allows the scheduler
5644 to run while these phases execute asynchronously, and allows
5645 the scheduler control output handling.
5648 scheduler = self._sched_iface
5649 settings = pkg_dblink.settings
5650 pkg = self._dblink_pkg(pkg_dblink)
5651 background = self._background
5652 log_path = settings.get("PORTAGE_LOG_FILE")
5654 ebuild_phase = EbuildPhase(background=background,
5655 pkg=pkg, phase=phase, scheduler=scheduler,
5656 settings=settings, tree=pkg_dblink.treetype)
5657 ebuild_phase.start()
5660 return ebuild_phase.returncode
5662 def _generate_digests(self):
5664 Generate digests if necessary for --digests or FEATURES=digest.
5665 In order to avoid interference, this must done before parallel
5669 if '--fetchonly' in self.myopts:
5672 digest = '--digest' in self.myopts
5674 for pkgsettings in self.pkgsettings.itervalues():
5675 if 'digest' in pkgsettings.features:
5682 for x in self._mergelist:
5683 if not isinstance(x, Package) or \
5684 x.type_name != 'ebuild' or \
5685 x.operation != 'merge':
5687 pkgsettings = self.pkgsettings[x.root]
5688 if '--digest' not in self.myopts and \
5689 'digest' not in pkgsettings.features:
5691 portdb = x.root_config.trees['porttree'].dbapi
5692 ebuild_path = portdb.findname(x.cpv)
5695 "!!! Could not locate ebuild for '%s'.\n" \
5696 % x.cpv, level=logging.ERROR, noiselevel=-1)
5698 pkgsettings['O'] = os.path.dirname(ebuild_path)
5699 if not portage.digestgen([], pkgsettings, myportdb=portdb):
5701 "!!! Unable to generate manifest for '%s'.\n" \
5702 % x.cpv, level=logging.ERROR, noiselevel=-1)
5707 def _check_manifests(self):
5708 # Verify all the manifests now so that the user is notified of failure
5709 # as soon as possible.
5710 if "strict" not in self.settings.features or \
5711 "--fetchonly" in self.myopts or \
5712 "--fetch-all-uri" in self.myopts:
5715 shown_verifying_msg = False
5717 for myroot, pkgsettings in self.pkgsettings.iteritems():
5718 quiet_config = portage.config(clone=pkgsettings)
5719 quiet_config["PORTAGE_QUIET"] = "1"
5720 quiet_config.backup_changes("PORTAGE_QUIET")
5721 quiet_settings[myroot] = quiet_config
5724 for x in self._mergelist:
5725 if not isinstance(x, Package) or \
5726 x.type_name != "ebuild":
5729 if not shown_verifying_msg:
5730 shown_verifying_msg = True
5731 self._status_msg("Verifying ebuild manifests")
5733 root_config = x.root_config
5734 portdb = root_config.trees["porttree"].dbapi
5735 quiet_config = quiet_settings[root_config.root]
5736 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
5737 if not portage.digestcheck([], quiet_config, strict=True):
5742 def _add_prefetchers(self):
5744 if not self._parallel_fetch:
5747 if self._parallel_fetch:
5748 self._status_msg("Starting parallel fetch")
5750 prefetchers = self._prefetchers
5751 getbinpkg = "--getbinpkg" in self.myopts
5753 # In order to avoid "waiting for lock" messages
5754 # at the beginning, which annoy users, never
5755 # spawn a prefetcher for the first package.
5756 for pkg in self._mergelist[1:]:
5757 prefetcher = self._create_prefetcher(pkg)
5758 if prefetcher is not None:
5759 self._task_queues.fetch.add(prefetcher)
5760 prefetchers[pkg] = prefetcher
5762 def _create_prefetcher(self, pkg):
5764 @return: a prefetcher, or None if not applicable
5768 if not isinstance(pkg, Package):
5771 elif pkg.type_name == "ebuild":
5773 prefetcher = EbuildFetcher(background=True,
5774 config_pool=self._ConfigPool(pkg.root,
5775 self._allocate_config, self._deallocate_config),
5776 fetchonly=1, logfile=self._fetch_log,
5777 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
5779 elif pkg.type_name == "binary" and \
5780 "--getbinpkg" in self.myopts and \
5781 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
5783 prefetcher = BinpkgPrefetcher(background=True,
5784 pkg=pkg, scheduler=self._sched_iface)
5788 def _is_restart_scheduled(self):
5790 Check if the merge list contains a replacement
5791 for the current running instance, that will result
5792 in restart after merge.
5794 @returns: True if a restart is scheduled, False otherwise.
5796 if self._opts_no_restart.intersection(self.myopts):
5799 mergelist = self._mergelist
5801 for i, pkg in enumerate(mergelist):
5802 if self._is_restart_necessary(pkg) and \
5803 i != len(mergelist) - 1:
5808 def _is_restart_necessary(self, pkg):
5810 @return: True if merging the given package
5811 requires restart, False otherwise.
5814 # Figure out if we need a restart.
5815 if pkg.root == self._running_root.root and \
5816 portage.match_from_list(
5817 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
5818 if self._running_portage:
5819 return pkg.cpv != self._running_portage.cpv
5823 def _restart_if_necessary(self, pkg):
5825 Use execv() to restart emerge. This happens
5826 if portage upgrades itself and there are
5827 remaining packages in the list.
5830 if self._opts_no_restart.intersection(self.myopts):
5833 if not self._is_restart_necessary(pkg):
5836 if pkg == self._mergelist[-1]:
5839 self._main_loop_cleanup()
5841 logger = self._logger
5842 pkg_count = self._pkg_count
5843 mtimedb = self._mtimedb
5844 bad_resume_opts = self._bad_resume_opts
5846 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
5847 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
5849 logger.log(" *** RESTARTING " + \
5850 "emerge via exec() after change of " + \
5853 mtimedb["resume"]["mergelist"].remove(list(pkg))
5855 portage.run_exitfuncs()
5856 mynewargv = [sys.argv[0], "--resume"]
5857 resume_opts = self.myopts.copy()
5858 # For automatic resume, we need to prevent
5859 # any of bad_resume_opts from leaking in
5860 # via EMERGE_DEFAULT_OPTS.
5861 resume_opts["--ignore-default-opts"] = True
5862 for myopt, myarg in resume_opts.iteritems():
5863 if myopt not in bad_resume_opts:
5865 mynewargv.append(myopt)
5867 mynewargv.append(myopt +"="+ str(myarg))
5868 # priority only needs to be adjusted on the first run
5869 os.environ["PORTAGE_NICENESS"] = "0"
5870 os.execv(mynewargv[0], mynewargv)
5874 if "--resume" in self.myopts:
5876 portage.writemsg_stdout(
5877 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
5878 self._logger.log(" *** Resuming merge...")
5880 self._save_resume_list()
5883 self._background = self._background_mode()
5884 except self._unknown_internal_error:
5887 for root in self.trees:
5888 root_config = self.trees[root]["root_config"]
5890 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
5891 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
5892 # for ensuring sane $PWD (bug #239560) and storing elog messages.
5893 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
5894 if not tmpdir or not os.path.isdir(tmpdir):
5895 msg = "The directory specified in your " + \
5896 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
5897 "does not exist. Please create this " + \
5898 "directory or correct your PORTAGE_TMPDIR setting."
5899 msg = textwrap.wrap(msg, 70)
5900 out = portage.output.EOutput()
5905 if self._background:
5906 root_config.settings.unlock()
5907 root_config.settings["PORTAGE_BACKGROUND"] = "1"
5908 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
5909 root_config.settings.lock()
5911 self.pkgsettings[root] = portage.config(
5912 clone=root_config.settings)
5914 rval = self._generate_digests()
5915 if rval != os.EX_OK:
5918 rval = self._check_manifests()
5919 if rval != os.EX_OK:
5922 keep_going = "--keep-going" in self.myopts
5923 fetchonly = self._build_opts.fetchonly
5924 mtimedb = self._mtimedb
5925 failed_pkgs = self._failed_pkgs
5928 rval = self._merge()
5929 if rval == os.EX_OK or fetchonly or not keep_going:
5931 if "resume" not in mtimedb:
5933 mergelist = self._mtimedb["resume"].get("mergelist")
5940 for failed_pkg in failed_pkgs:
5941 mergelist.remove(list(failed_pkg.pkg))
5943 self._failed_pkgs_all.extend(failed_pkgs)
5949 if not self._calc_resume_list():
5952 clear_caches(self.trees)
5953 if not self._mergelist:
5956 self._save_resume_list()
5957 self._pkg_count.curval = 0
5958 self._pkg_count.maxval = len([x for x in self._mergelist \
5959 if isinstance(x, Package) and x.operation == "merge"])
5960 self._status_display.maxval = self._pkg_count.maxval
5962 self._logger.log(" *** Finished. Cleaning up...")
5965 self._failed_pkgs_all.extend(failed_pkgs)
5968 background = self._background
5969 failure_log_shown = False
5970 if background and len(self._failed_pkgs_all) == 1:
5971 # If only one package failed then just show it's
5972 # whole log for easy viewing.
5973 failed_pkg = self._failed_pkgs_all[-1]
5974 build_dir = failed_pkg.build_dir
5977 log_paths = [failed_pkg.build_log]
5979 log_path = self._locate_failure_log(failed_pkg)
5980 if log_path is not None:
5982 log_file = open(log_path)
5986 if log_file is not None:
5988 for line in log_file:
5989 writemsg_level(line, noiselevel=-1)
5992 failure_log_shown = True
5994 # Dump mod_echo output now since it tends to flood the terminal.
5995 # This allows us to avoid having more important output, generated
5996 # later, from being swept away by the mod_echo output.
5997 mod_echo_output = _flush_elog_mod_echo()
5999 if background and not failure_log_shown and \
6000 self._failed_pkgs_all and \
6001 self._failed_pkgs_die_msgs and \
6002 not mod_echo_output:
6004 printer = portage.output.EOutput()
6005 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
6007 if mysettings["ROOT"] != "/":
6008 root_msg = " merged to %s" % mysettings["ROOT"]
6010 printer.einfo("Error messages for package %s%s:" % \
6011 (colorize("INFORM", key), root_msg))
6013 for phase in portage.const.EBUILD_PHASES:
6014 if phase not in logentries:
6016 for msgtype, msgcontent in logentries[phase]:
6017 if isinstance(msgcontent, basestring):
6018 msgcontent = [msgcontent]
6019 for line in msgcontent:
6020 printer.eerror(line.strip("\n"))
6022 if self._post_mod_echo_msgs:
6023 for msg in self._post_mod_echo_msgs:
6026 if len(self._failed_pkgs_all) > 1 or \
6027 (self._failed_pkgs_all and "--keep-going" in self.myopts):
6028 if len(self._failed_pkgs_all) > 1:
6029 msg = "The following %d packages have " % \
6030 len(self._failed_pkgs_all) + \
6031 "failed to build or install:"
6033 msg = "The following package has " + \
6034 "failed to build or install:"
6036 writemsg(prefix + "\n", noiselevel=-1)
6037 from textwrap import wrap
6038 for line in wrap(msg, 72):
6039 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
6040 writemsg(prefix + "\n", noiselevel=-1)
6041 for failed_pkg in self._failed_pkgs_all:
6042 writemsg("%s\t%s\n" % (prefix,
6043 colorize("INFORM", str(failed_pkg.pkg))),
6045 writemsg(prefix + "\n", noiselevel=-1)
6049 def _elog_listener(self, mysettings, key, logentries, fulltext):
6050 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
6052 self._failed_pkgs_die_msgs.append(
6053 (mysettings, key, errors))
6055 def _locate_failure_log(self, failed_pkg):
6057 build_dir = failed_pkg.build_dir
6060 log_paths = [failed_pkg.build_log]
6062 for log_path in log_paths:
6067 log_size = os.stat(log_path).st_size
6078 def _add_packages(self):
6079 pkg_queue = self._pkg_queue
6080 for pkg in self._mergelist:
6081 if isinstance(pkg, Package):
6082 pkg_queue.append(pkg)
6083 elif isinstance(pkg, Blocker):
6086 def _system_merge_started(self, merge):
6088 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
6090 graph = self._digraph
6093 pkg = merge.merge.pkg
6095 # Skip this if $ROOT != / since it shouldn't matter if there
6096 # are unsatisfied system runtime deps in this case.
6100 completed_tasks = self._completed_tasks
6101 unsatisfied = self._unsatisfied_system_deps
6103 def ignore_non_runtime_or_satisfied(priority):
6105 Ignore non-runtime and satisfied runtime priorities.
6107 if isinstance(priority, DepPriority) and \
6108 not priority.satisfied and \
6109 (priority.runtime or priority.runtime_post):
6113 # When checking for unsatisfied runtime deps, only check
6114 # direct deps since indirect deps are checked when the
6115 # corresponding parent is merged.
6116 for child in graph.child_nodes(pkg,
6117 ignore_priority=ignore_non_runtime_or_satisfied):
6118 if not isinstance(child, Package) or \
6119 child.operation == 'uninstall':
6123 if child.operation == 'merge' and \
6124 child not in completed_tasks:
6125 unsatisfied.add(child)
6127 def _merge_wait_exit_handler(self, task):
6128 self._merge_wait_scheduled.remove(task)
6129 self._merge_exit(task)
6131 def _merge_exit(self, merge):
6132 self._do_merge_exit(merge)
6133 self._deallocate_config(merge.merge.settings)
6134 if merge.returncode == os.EX_OK and \
6135 not merge.merge.pkg.installed:
6136 self._status_display.curval += 1
6137 self._status_display.merges = len(self._task_queues.merge)
6140 def _do_merge_exit(self, merge):
6141 pkg = merge.merge.pkg
6142 if merge.returncode != os.EX_OK:
6143 settings = merge.merge.settings
6144 build_dir = settings.get("PORTAGE_BUILDDIR")
6145 build_log = settings.get("PORTAGE_LOG_FILE")
6147 self._failed_pkgs.append(self._failed_pkg(
6148 build_dir=build_dir, build_log=build_log,
6150 returncode=merge.returncode))
6151 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
6153 self._status_display.failed = len(self._failed_pkgs)
6156 self._task_complete(pkg)
6157 pkg_to_replace = merge.merge.pkg_to_replace
6158 if pkg_to_replace is not None:
6159 # When a package is replaced, mark it's uninstall
6160 # task complete (if any).
6162 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
6163 self._task_complete(uninst_hash_key)
6168 self._restart_if_necessary(pkg)
6170 # Call mtimedb.commit() after each merge so that
6171 # --resume still works after being interrupted
6172 # by reboot, sigkill or similar.
6173 mtimedb = self._mtimedb
6174 mtimedb["resume"]["mergelist"].remove(list(pkg))
6175 if not mtimedb["resume"]["mergelist"]:
6176 del mtimedb["resume"]
6179 def _build_exit(self, build):
6180 if build.returncode == os.EX_OK:
6182 merge = PackageMerge(merge=build)
6183 if not build.build_opts.buildpkgonly and \
6184 build.pkg in self._deep_system_deps:
6185 # Since dependencies on system packages are frequently
6186 # unspecified, merge them only when no builds are executing.
6187 self._merge_wait_queue.append(merge)
6188 merge.addStartListener(self._system_merge_started)
6190 merge.addExitListener(self._merge_exit)
6191 self._task_queues.merge.add(merge)
6192 self._status_display.merges = len(self._task_queues.merge)
6194 settings = build.settings
6195 build_dir = settings.get("PORTAGE_BUILDDIR")
6196 build_log = settings.get("PORTAGE_LOG_FILE")
6198 self._failed_pkgs.append(self._failed_pkg(
6199 build_dir=build_dir, build_log=build_log,
6201 returncode=build.returncode))
6202 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
6204 self._status_display.failed = len(self._failed_pkgs)
6205 self._deallocate_config(build.settings)
6207 self._status_display.running = self._jobs
6210 def _extract_exit(self, build):
6211 self._build_exit(build)
6213 def _task_complete(self, pkg):
6214 self._completed_tasks.add(pkg)
6215 self._unsatisfied_system_deps.discard(pkg)
6216 self._choose_pkg_return_early = False
6220 self._add_prefetchers()
6221 self._add_packages()
6222 pkg_queue = self._pkg_queue
6223 failed_pkgs = self._failed_pkgs
6224 portage.locks._quiet = self._background
6225 portage.elog._emerge_elog_listener = self._elog_listener
6231 self._main_loop_cleanup()
6232 portage.locks._quiet = False
6233 portage.elog._emerge_elog_listener = None
6235 rval = failed_pkgs[-1].returncode
6239 def _main_loop_cleanup(self):
6240 del self._pkg_queue[:]
6241 self._completed_tasks.clear()
6242 self._deep_system_deps.clear()
6243 self._unsatisfied_system_deps.clear()
6244 self._choose_pkg_return_early = False
6245 self._status_display.reset()
6246 self._digraph = None
6247 self._task_queues.fetch.clear()
6249 def _choose_pkg(self):
6251 Choose a task that has all it's dependencies satisfied.
6254 if self._choose_pkg_return_early:
6257 if self._digraph is None:
6258 if (self._jobs or self._task_queues.merge) and \
6259 not ("--nodeps" in self.myopts and \
6260 (self._max_jobs is True or self._max_jobs > 1)):
6261 self._choose_pkg_return_early = True
6263 return self._pkg_queue.pop(0)
6265 if not (self._jobs or self._task_queues.merge):
6266 return self._pkg_queue.pop(0)
6268 self._prune_digraph()
6271 later = set(self._pkg_queue)
6272 for pkg in self._pkg_queue:
6274 if not self._dependent_on_scheduled_merges(pkg, later):
6278 if chosen_pkg is not None:
6279 self._pkg_queue.remove(chosen_pkg)
6281 if chosen_pkg is None:
6282 # There's no point in searching for a package to
6283 # choose until at least one of the existing jobs
6285 self._choose_pkg_return_early = True
6289 def _dependent_on_scheduled_merges(self, pkg, later):
6291 Traverse the subgraph of the given packages deep dependencies
6292 to see if it contains any scheduled merges.
6293 @param pkg: a package to check dependencies for
6295 @param later: packages for which dependence should be ignored
6296 since they will be merged later than pkg anyway and therefore
6297 delaying the merge of pkg will not result in a more optimal
6301 @returns: True if the package is dependent, False otherwise.
6304 graph = self._digraph
6305 completed_tasks = self._completed_tasks
6308 traversed_nodes = set([pkg])
6309 direct_deps = graph.child_nodes(pkg)
6310 node_stack = direct_deps
6311 direct_deps = frozenset(direct_deps)
6313 node = node_stack.pop()
6314 if node in traversed_nodes:
6316 traversed_nodes.add(node)
6317 if not ((node.installed and node.operation == "nomerge") or \
6318 (node.operation == "uninstall" and \
6319 node not in direct_deps) or \
6320 node in completed_tasks or \
6324 node_stack.extend(graph.child_nodes(node))
6328 def _allocate_config(self, root):
6330 Allocate a unique config instance for a task in order
6331 to prevent interference between parallel tasks.
6333 if self._config_pool[root]:
6334 temp_settings = self._config_pool[root].pop()
6336 temp_settings = portage.config(clone=self.pkgsettings[root])
6337 # Since config.setcpv() isn't guaranteed to call config.reset() due to
6338 # performance reasons, call it here to make sure all settings from the
6339 # previous package get flushed out (such as PORTAGE_LOG_FILE).
6340 temp_settings.reload()
6341 temp_settings.reset()
6342 return temp_settings
6344 def _deallocate_config(self, settings):
6345 self._config_pool[settings["ROOT"]].append(settings)
6347 def _main_loop(self):
6349 # Only allow 1 job max if a restart is scheduled
6350 # due to portage update.
6351 if self._is_restart_scheduled() or \
6352 self._opts_no_background.intersection(self.myopts):
6353 self._set_max_jobs(1)
6355 merge_queue = self._task_queues.merge
6357 while self._schedule():
6358 if self._poll_event_handlers:
6363 if not (self._jobs or merge_queue):
6365 if self._poll_event_handlers:
6368 def _keep_scheduling(self):
6369 return bool(self._pkg_queue and \
6370 not (self._failed_pkgs and not self._build_opts.fetchonly))
6372 def _schedule_tasks(self):
6374 # When the number of jobs drops to zero, process all waiting merges.
6375 if not self._jobs and self._merge_wait_queue:
6376 for task in self._merge_wait_queue:
6377 task.addExitListener(self._merge_wait_exit_handler)
6378 self._task_queues.merge.add(task)
6379 self._status_display.merges = len(self._task_queues.merge)
6380 self._merge_wait_scheduled.extend(self._merge_wait_queue)
6381 del self._merge_wait_queue[:]
6383 self._schedule_tasks_imp()
6384 self._status_display.display()
6387 for q in self._task_queues.values():
6391 # Cancel prefetchers if they're the only reason
6392 # the main poll loop is still running.
6393 if self._failed_pkgs and not self._build_opts.fetchonly and \
6394 not (self._jobs or self._task_queues.merge) and \
6395 self._task_queues.fetch:
6396 self._task_queues.fetch.clear()
6400 self._schedule_tasks_imp()
6401 self._status_display.display()
6403 return self._keep_scheduling()
6405 def _job_delay(self):
6408 @returns: True if job scheduling should be delayed, False otherwise.
6411 if self._jobs and self._max_load is not None:
6413 current_time = time.time()
6415 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
6416 if delay > self._job_delay_max:
6417 delay = self._job_delay_max
6418 if (current_time - self._previous_job_start_time) < delay:
6423 def _schedule_tasks_imp(self):
6426 @returns: True if state changed, False otherwise.
6433 if not self._keep_scheduling():
6434 return bool(state_change)
6436 if self._choose_pkg_return_early or \
6437 self._merge_wait_scheduled or \
6438 (self._jobs and self._unsatisfied_system_deps) or \
6439 not self._can_add_job() or \
6441 return bool(state_change)
6443 pkg = self._choose_pkg()
6445 return bool(state_change)
6449 if not pkg.installed:
6450 self._pkg_count.curval += 1
6452 task = self._task(pkg)
6455 merge = PackageMerge(merge=task)
6456 merge.addExitListener(self._merge_exit)
6457 self._task_queues.merge.add(merge)
6461 self._previous_job_start_time = time.time()
6462 self._status_display.running = self._jobs
6463 task.addExitListener(self._extract_exit)
6464 self._task_queues.jobs.add(task)
6468 self._previous_job_start_time = time.time()
6469 self._status_display.running = self._jobs
6470 task.addExitListener(self._build_exit)
6471 self._task_queues.jobs.add(task)
6473 return bool(state_change)
6475 def _task(self, pkg):
6477 pkg_to_replace = None
6478 if pkg.operation != "uninstall":
6479 vardb = pkg.root_config.trees["vartree"].dbapi
6480 previous_cpv = vardb.match(pkg.slot_atom)
6482 previous_cpv = previous_cpv.pop()
6483 pkg_to_replace = self._pkg(previous_cpv,
6484 "installed", pkg.root_config, installed=True)
6486 task = MergeListItem(args_set=self._args_set,
6487 background=self._background, binpkg_opts=self._binpkg_opts,
6488 build_opts=self._build_opts,
6489 config_pool=self._ConfigPool(pkg.root,
6490 self._allocate_config, self._deallocate_config),
6491 emerge_opts=self.myopts,
6492 find_blockers=self._find_blockers(pkg), logger=self._logger,
6493 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
6494 pkg_to_replace=pkg_to_replace,
6495 prefetcher=self._prefetchers.get(pkg),
6496 scheduler=self._sched_iface,
6497 settings=self._allocate_config(pkg.root),
6498 statusMessage=self._status_msg,
6499 world_atom=self._world_atom)
6503 def _failed_pkg_msg(self, failed_pkg, action, preposition):
6504 pkg = failed_pkg.pkg
6505 msg = "%s to %s %s" % \
6506 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
6508 msg += " %s %s" % (preposition, pkg.root)
6510 log_path = self._locate_failure_log(failed_pkg)
6511 if log_path is not None:
6512 msg += ", Log file:"
6513 self._status_msg(msg)
6515 if log_path is not None:
6516 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
6518 def _status_msg(self, msg):
6520 Display a brief status message (no newlines) in the status display.
6521 This is called by tasks to provide feedback to the user. This
6522 delegates the resposibility of generating \r and \n control characters,
6523 to guarantee that lines are created or erased when necessary and
6527 @param msg: a brief status message (no newlines allowed)
6529 if not self._background:
6530 writemsg_level("\n")
6531 self._status_display.displayMessage(msg)
6533 def _save_resume_list(self):
6535 Do this before verifying the ebuild Manifests since it might
6536 be possible for the user to use --resume --skipfirst get past
6537 a non-essential package with a broken digest.
6539 mtimedb = self._mtimedb
6540 mtimedb["resume"]["mergelist"] = [list(x) \
6541 for x in self._mergelist \
6542 if isinstance(x, Package) and x.operation == "merge"]
6546 def _calc_resume_list(self):
6548 Use the current resume list to calculate a new one,
6549 dropping any packages with unsatisfied deps.
6551 @returns: True if successful, False otherwise.
6553 print colorize("GOOD", "*** Resuming merge...")
6555 if self._show_list():
6556 if "--tree" in self.myopts:
6557 portage.writemsg_stdout("\n" + \
6558 darkgreen("These are the packages that " + \
6559 "would be merged, in reverse order:\n\n"))
6562 portage.writemsg_stdout("\n" + \
6563 darkgreen("These are the packages that " + \
6564 "would be merged, in order:\n\n"))
6566 show_spinner = "--quiet" not in self.myopts and \
6567 "--nodeps" not in self.myopts
6570 print "Calculating dependencies ",
6572 myparams = create_depgraph_params(self.myopts, None)
6576 success, mydepgraph, dropped_tasks = resume_depgraph(
6577 self.settings, self.trees, self._mtimedb, self.myopts,
6578 myparams, self._spinner)
6579 except depgraph.UnsatisfiedResumeDep, exc:
6580 # rename variable to avoid python-3.0 error:
6581 # SyntaxError: can not delete variable 'e' referenced in nested
6584 mydepgraph = e.depgraph
6585 dropped_tasks = set()
6588 print "\b\b... done!"
6591 def unsatisfied_resume_dep_msg():
6592 mydepgraph.display_problems()
6593 out = portage.output.EOutput()
6594 out.eerror("One or more packages are either masked or " + \
6595 "have missing dependencies:")
6598 show_parents = set()
6600 if dep.parent in show_parents:
6602 show_parents.add(dep.parent)
6603 if dep.atom is None:
6604 out.eerror(indent + "Masked package:")
6605 out.eerror(2 * indent + str(dep.parent))
6608 out.eerror(indent + str(dep.atom) + " pulled in by:")
6609 out.eerror(2 * indent + str(dep.parent))
6611 msg = "The resume list contains packages " + \
6612 "that are either masked or have " + \
6613 "unsatisfied dependencies. " + \
6614 "Please restart/continue " + \
6615 "the operation manually, or use --skipfirst " + \
6616 "to skip the first package in the list and " + \
6617 "any other packages that may be " + \
6618 "masked or have missing dependencies."
6619 for line in textwrap.wrap(msg, 72):
6621 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
6624 if success and self._show_list():
6625 mylist = mydepgraph.altlist()
6627 if "--tree" in self.myopts:
6629 mydepgraph.display(mylist, favorites=self._favorites)
6632 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
6634 mydepgraph.display_problems()
6636 mylist = mydepgraph.altlist()
6637 mydepgraph.break_refs(mylist)
6638 mydepgraph.break_refs(dropped_tasks)
6639 self._mergelist = mylist
6640 self._set_digraph(mydepgraph.schedulerGraph())
6643 for task in dropped_tasks:
6644 if not (isinstance(task, Package) and task.operation == "merge"):
6647 msg = "emerge --keep-going:" + \
6650 msg += " for %s" % (pkg.root,)
6651 msg += " dropped due to unsatisfied dependency."
6652 for line in textwrap.wrap(msg, msg_width):
6653 eerror(line, phase="other", key=pkg.cpv)
6654 settings = self.pkgsettings[pkg.root]
6655 # Ensure that log collection from $T is disabled inside
6656 # elog_process(), since any logs that might exist are
6658 settings.pop("T", None)
6659 portage.elog.elog_process(pkg.cpv, settings)
6660 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
6664 def _show_list(self):
6665 myopts = self.myopts
6666 if "--quiet" not in myopts and \
6667 ("--ask" in myopts or "--tree" in myopts or \
6668 "--verbose" in myopts):
6672 def _world_atom(self, pkg):
6674 Add the package to the world file, but only if
6675 it's supposed to be added. Otherwise, do nothing.
6678 if set(("--buildpkgonly", "--fetchonly",
6680 "--oneshot", "--onlydeps",
6681 "--pretend")).intersection(self.myopts):
6684 if pkg.root != self.target_root:
6687 args_set = self._args_set
6688 if not args_set.findAtomForPackage(pkg):
6691 logger = self._logger
6692 pkg_count = self._pkg_count
6693 root_config = pkg.root_config
6694 world_set = root_config.sets["world"]
6695 world_locked = False
6696 if hasattr(world_set, "lock"):
6701 if hasattr(world_set, "load"):
6702 world_set.load() # maybe it's changed on disk
6704 atom = create_world_atom(pkg, args_set, root_config)
6706 if hasattr(world_set, "add"):
6707 self._status_msg(('Recording %s in "world" ' + \
6708 'favorites file...') % atom)
6709 logger.log(" === (%s of %s) Updating world file (%s)" % \
6710 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
6713 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
6714 (atom,), level=logging.WARN, noiselevel=-1)
6719 def _pkg(self, cpv, type_name, root_config, installed=False):
6721 Get a package instance from the cache, or create a new
6722 one if necessary. Raises KeyError from aux_get if it
6723 failures for some reason (package does not exist or is
6728 operation = "nomerge"
6730 if self._digraph is not None:
6731 # Reuse existing instance when available.
6732 pkg = self._digraph.get(
6733 (type_name, root_config.root, cpv, operation))
6737 tree_type = depgraph.pkg_tree_map[type_name]
6738 db = root_config.trees[tree_type].dbapi
6739 db_keys = list(self.trees[root_config.root][
6740 tree_type].dbapi._aux_cache_keys)
6741 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6742 pkg = Package(cpv=cpv, metadata=metadata,
6743 root_config=root_config, installed=installed)
6744 if type_name == "ebuild":
6745 settings = self.pkgsettings[root_config.root]
6746 settings.setcpv(pkg)
6747 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6748 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6752 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
6754 if os.path.exists("/usr/bin/install-info"):
6755 out = portage.output.EOutput()
6760 inforoot=normpath(root+z)
6761 if os.path.isdir(inforoot):
6762 infomtime = long(os.stat(inforoot).st_mtime)
6763 if inforoot not in prev_mtimes or \
6764 prev_mtimes[inforoot] != infomtime:
6765 regen_infodirs.append(inforoot)
6767 if not regen_infodirs:
6768 portage.writemsg_stdout("\n")
6769 out.einfo("GNU info directory index is up-to-date.")
6771 portage.writemsg_stdout("\n")
6772 out.einfo("Regenerating GNU info directory index...")
6774 dir_extensions = ("", ".gz", ".bz2")
6778 for inforoot in regen_infodirs:
6782 if not os.path.isdir(inforoot) or \
6783 not os.access(inforoot, os.W_OK):
6786 file_list = os.listdir(inforoot)
6788 dir_file = os.path.join(inforoot, "dir")
6789 moved_old_dir = False
6792 if x.startswith(".") or \
6793 os.path.isdir(os.path.join(inforoot, x)):
6795 if x.startswith("dir"):
6797 for ext in dir_extensions:
6798 if x == "dir" + ext or \
6799 x == "dir" + ext + ".old":
6804 if processed_count == 0:
6805 for ext in dir_extensions:
6807 os.rename(dir_file + ext, dir_file + ext + ".old")
6808 moved_old_dir = True
6809 except EnvironmentError, e:
6810 if e.errno != errno.ENOENT:
6813 processed_count += 1
6814 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
6815 existsstr="already exists, for file `"
6817 if re.search(existsstr,myso):
6818 # Already exists... Don't increment the count for this.
6820 elif myso[:44]=="install-info: warning: no info dir entry in ":
6821 # This info file doesn't contain a DIR-header: install-info produces this
6822 # (harmless) warning (the --quiet switch doesn't seem to work).
6823 # Don't increment the count for this.
6827 errmsg += myso + "\n"
6830 if moved_old_dir and not os.path.exists(dir_file):
6831 # We didn't generate a new dir file, so put the old file
6832 # back where it was originally found.
6833 for ext in dir_extensions:
6835 os.rename(dir_file + ext + ".old", dir_file + ext)
6836 except EnvironmentError, e:
6837 if e.errno != errno.ENOENT:
6841 # Clean dir.old cruft so that they don't prevent
6842 # unmerge of otherwise empty directories.
6843 for ext in dir_extensions:
6845 os.unlink(dir_file + ext + ".old")
6846 except EnvironmentError, e:
6847 if e.errno != errno.ENOENT:
6851 #update mtime so we can potentially avoid regenerating.
6852 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
6855 out.eerror("Processed %d info files; %d errors." % \
6857 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
6860 out.einfo("Processed %d info files." % (icount,))
6863 def display_news_notification(root_config, myopts):
6864 target_root = root_config.root
6865 trees = root_config.trees
6866 settings = trees["vartree"].settings
6867 portdb = trees["porttree"].dbapi
6868 vardb = trees["vartree"].dbapi
6869 NEWS_PATH = os.path.join("metadata", "news")
6870 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
6871 newsReaderDisplay = False
6872 update = "--pretend" not in myopts
6874 for repo in portdb.getRepositories():
6875 unreadItems = checkUpdatedNewsItems(
6876 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
6878 if not newsReaderDisplay:
6879 newsReaderDisplay = True
6881 print colorize("WARN", " * IMPORTANT:"),
6882 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
6885 if newsReaderDisplay:
6886 print colorize("WARN", " *"),
6887 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
6890 def display_preserved_libs(vardbapi):
6893 # Ensure the registry is consistent with existing files.
6894 vardbapi.plib_registry.pruneNonExisting()
6896 if vardbapi.plib_registry.hasEntries():
6898 print colorize("WARN", "!!!") + " existing preserved libs:"
6899 plibdata = vardbapi.plib_registry.getPreservedLibs()
6900 linkmap = vardbapi.linkmap
6903 linkmap_broken = False
6907 except portage.exception.CommandNotFound, e:
6908 writemsg_level("!!! Command Not Found: %s\n" % (e,),
6909 level=logging.ERROR, noiselevel=-1)
6911 linkmap_broken = True
6913 search_for_owners = set()
6914 for cpv in plibdata:
6915 internal_plib_keys = set(linkmap._obj_key(f) \
6916 for f in plibdata[cpv])
6917 for f in plibdata[cpv]:
6918 if f in consumer_map:
6921 for c in linkmap.findConsumers(f):
6922 # Filter out any consumers that are also preserved libs
6923 # belonging to the same package as the provider.
6924 if linkmap._obj_key(c) not in internal_plib_keys:
6927 consumer_map[f] = consumers
6928 search_for_owners.update(consumers[:MAX_DISPLAY+1])
6930 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
6932 for cpv in plibdata:
6933 print colorize("WARN", ">>>") + " package: %s" % cpv
6935 for f in plibdata[cpv]:
6936 obj_key = linkmap._obj_key(f)
6937 alt_paths = samefile_map.get(obj_key)
6938 if alt_paths is None:
6940 samefile_map[obj_key] = alt_paths
6943 for alt_paths in samefile_map.itervalues():
6944 alt_paths = sorted(alt_paths)
6946 print colorize("WARN", " * ") + " - %s" % (p,)
6948 consumers = consumer_map.get(f, [])
6949 for c in consumers[:MAX_DISPLAY]:
6950 print colorize("WARN", " * ") + " used by %s (%s)" % \
6951 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
6952 if len(consumers) == MAX_DISPLAY + 1:
6953 print colorize("WARN", " * ") + " used by %s (%s)" % \
6954 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
6955 for x in owners.get(consumers[MAX_DISPLAY], [])))
6956 elif len(consumers) > MAX_DISPLAY:
6957 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
6958 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
6961 def _flush_elog_mod_echo():
6963 Dump the mod_echo output now so that our other
6964 notifications are shown last.
6966 @returns: True if messages were shown, False otherwise.
6968 messages_shown = False
6970 from portage.elog import mod_echo
6972 pass # happens during downgrade to a version without the module
6974 messages_shown = bool(mod_echo._items)
6976 return messages_shown
6978 def post_emerge(root_config, myopts, mtimedb, retval):
6980 Misc. things to run at the end of a merge session.
6986 Display preserved libs warnings
6989 @param trees: A dictionary mapping each ROOT to it's package databases
6991 @param mtimedb: The mtimeDB to store data needed across merge invocations
6992 @type mtimedb: MtimeDB class instance
6993 @param retval: Emerge's return value
6997 1. Calls sys.exit(retval)
7000 target_root = root_config.root
7001 trees = { target_root : root_config.trees }
7002 vardbapi = trees[target_root]["vartree"].dbapi
7003 settings = vardbapi.settings
7004 info_mtimes = mtimedb["info"]
7006 # Load the most current variables from ${ROOT}/etc/profile.env
7009 settings.regenerate()
7012 config_protect = settings.get("CONFIG_PROTECT","").split()
7013 infodirs = settings.get("INFOPATH","").split(":") + \
7014 settings.get("INFODIR","").split(":")
7018 if retval == os.EX_OK:
7019 exit_msg = " *** exiting successfully."
7021 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
7022 emergelog("notitles" not in settings.features, exit_msg)
7024 _flush_elog_mod_echo()
7026 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
7027 if "--pretend" in myopts or (counter_hash is not None and \
7028 counter_hash == vardbapi._counter_hash()):
7029 display_news_notification(root_config, myopts)
7030 # If vdb state has not changed then there's nothing else to do.
7033 vdb_path = os.path.join(target_root, portage.VDB_PATH)
7034 portage.util.ensure_dirs(vdb_path)
7036 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
7037 vdb_lock = portage.locks.lockdir(vdb_path)
7041 if "noinfo" not in settings.features:
7042 chk_updated_info_files(target_root,
7043 infodirs, info_mtimes, retval)
7047 portage.locks.unlockdir(vdb_lock)
7049 chk_updated_cfg_files(target_root, config_protect)
7051 display_news_notification(root_config, myopts)
7052 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
7053 display_preserved_libs(vardbapi)
7058 def chk_updated_cfg_files(target_root, config_protect):
7060 #number of directories with some protect files in them
7062 for x in config_protect:
7063 x = os.path.join(target_root, x.lstrip(os.path.sep))
7064 if not os.access(x, os.W_OK):
7065 # Avoid Permission denied errors generated
7069 mymode = os.lstat(x).st_mode
7072 if stat.S_ISLNK(mymode):
7073 # We want to treat it like a directory if it
7074 # is a symlink to an existing directory.
7076 real_mode = os.stat(x).st_mode
7077 if stat.S_ISDIR(real_mode):
7081 if stat.S_ISDIR(mymode):
7082 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
7084 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
7085 os.path.split(x.rstrip(os.path.sep))
7086 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
7087 a = commands.getstatusoutput(mycommand)
7089 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
7091 # Show the error message alone, sending stdout to /dev/null.
7092 os.system(mycommand + " 1>/dev/null")
7094 files = a[1].split('\0')
7095 # split always produces an empty string as the last element
7096 if files and not files[-1]:
7100 print "\n"+colorize("WARN", " * IMPORTANT:"),
7101 if stat.S_ISDIR(mymode):
7102 print "%d config files in '%s' need updating." % \
7105 print "config file '%s' needs updating." % x
7108 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
7109 " section of the " + bold("emerge")
7110 print " "+yellow("*")+" man page to learn how to update config files."
7112 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
7115 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
7116 Returns the number of unread (yet relevent) items.
7118 @param portdb: a portage tree database
7119 @type portdb: pordbapi
7120 @param vardb: an installed package database
7121 @type vardb: vardbapi
7130 1. The number of unread but relevant news items.
7133 from portage.news import NewsManager
7134 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
7135 return manager.getUnreadItems( repo_id, update=update )
7137 def insert_category_into_atom(atom, category):
7138 alphanum = re.search(r'\w', atom)
7140 ret = atom[:alphanum.start()] + "%s/" % category + \
7141 atom[alphanum.start():]
7146 def is_valid_package_atom(x):
7148 alphanum = re.search(r'\w', x)
7150 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
7151 return portage.isvalidatom(x)
7153 def show_blocker_docs_link():
7155 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
7156 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
7158 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
7161 def show_mask_docs():
7162 print "For more information, see the MASKED PACKAGES section in the emerge"
7163 print "man page or refer to the Gentoo Handbook."
7165 def action_sync(settings, trees, mtimedb, myopts, myaction):
7166 xterm_titles = "notitles" not in settings.features
7167 emergelog(xterm_titles, " === sync")
7168 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7169 myportdir = portdb.porttree_root
7170 out = portage.output.EOutput()
7172 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
7174 if myportdir[-1]=="/":
7175 myportdir=myportdir[:-1]
7177 st = os.stat(myportdir)
7181 print ">>>",myportdir,"not found, creating it."
7182 os.makedirs(myportdir,0755)
7183 st = os.stat(myportdir)
7186 spawn_kwargs["env"] = settings.environ()
7187 if 'usersync' in settings.features and \
7188 portage.data.secpass >= 2 and \
7189 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
7190 st.st_gid != os.getgid() and st.st_mode & 0070):
7192 homedir = pwd.getpwuid(st.st_uid).pw_dir
7196 # Drop privileges when syncing, in order to match
7197 # existing uid/gid settings.
7198 spawn_kwargs["uid"] = st.st_uid
7199 spawn_kwargs["gid"] = st.st_gid
7200 spawn_kwargs["groups"] = [st.st_gid]
7201 spawn_kwargs["env"]["HOME"] = homedir
7203 if not st.st_mode & 0020:
7204 umask = umask | 0020
7205 spawn_kwargs["umask"] = umask
7207 syncuri = settings.get("SYNC", "").strip()
7209 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
7210 noiselevel=-1, level=logging.ERROR)
7213 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
7214 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
7218 updatecache_flg = False
7219 if myaction == "metadata":
7220 print "skipping sync"
7221 updatecache_flg = True
7222 elif ".git" in vcs_dirs:
7223 # Update existing git repository, and ignore the syncuri. We are
7224 # going to trust the user and assume that the user is in the branch
7225 # that he/she wants updated. We'll let the user manage branches with
7227 if portage.process.find_binary("git") is None:
7228 msg = ["Command not found: git",
7229 "Type \"emerge dev-util/git\" to enable git support."]
7231 writemsg_level("!!! %s\n" % l,
7232 level=logging.ERROR, noiselevel=-1)
7234 msg = ">>> Starting git pull in %s..." % myportdir
7235 emergelog(xterm_titles, msg )
7236 writemsg_level(msg + "\n")
7237 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
7238 (portage._shell_quote(myportdir),), **spawn_kwargs)
7239 if exitcode != os.EX_OK:
7240 msg = "!!! git pull error in %s." % myportdir
7241 emergelog(xterm_titles, msg)
7242 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
7244 msg = ">>> Git pull in %s successful" % myportdir
7245 emergelog(xterm_titles, msg)
7246 writemsg_level(msg + "\n")
7247 exitcode = git_sync_timestamps(settings, myportdir)
7248 if exitcode == os.EX_OK:
7249 updatecache_flg = True
7250 elif syncuri[:8]=="rsync://":
7251 for vcs_dir in vcs_dirs:
7252 writemsg_level(("!!! %s appears to be under revision " + \
7253 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
7254 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
7256 if not os.path.exists("/usr/bin/rsync"):
7257 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
7258 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
7263 if settings["PORTAGE_RSYNC_OPTS"] == "":
7264 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
7266 "--recursive", # Recurse directories
7267 "--links", # Consider symlinks
7268 "--safe-links", # Ignore links outside of tree
7269 "--perms", # Preserve permissions
7270 "--times", # Preserive mod times
7271 "--compress", # Compress the data transmitted
7272 "--force", # Force deletion on non-empty dirs
7273 "--whole-file", # Don't do block transfers, only entire files
7274 "--delete", # Delete files that aren't in the master tree
7275 "--stats", # Show final statistics about what was transfered
7276 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
7277 "--exclude=/distfiles", # Exclude distfiles from consideration
7278 "--exclude=/local", # Exclude local from consideration
7279 "--exclude=/packages", # Exclude packages from consideration
7283 # The below validation is not needed when using the above hardcoded
7286 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
7288 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
7289 for opt in ("--recursive", "--times"):
7290 if opt not in rsync_opts:
7291 portage.writemsg(yellow("WARNING:") + " adding required option " + \
7292 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
7293 rsync_opts.append(opt)
7295 for exclude in ("distfiles", "local", "packages"):
7296 opt = "--exclude=/%s" % exclude
7297 if opt not in rsync_opts:
7298 portage.writemsg(yellow("WARNING:") + \
7299 " adding required option %s not included in " % opt + \
7300 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
7301 rsync_opts.append(opt)
7303 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
7304 def rsync_opt_startswith(opt_prefix):
7305 for x in rsync_opts:
7306 if x.startswith(opt_prefix):
7310 if not rsync_opt_startswith("--timeout="):
7311 rsync_opts.append("--timeout=%d" % mytimeout)
7313 for opt in ("--compress", "--whole-file"):
7314 if opt not in rsync_opts:
7315 portage.writemsg(yellow("WARNING:") + " adding required option " + \
7316 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
7317 rsync_opts.append(opt)
7319 if "--quiet" in myopts:
7320 rsync_opts.append("--quiet") # Shut up a lot
7322 rsync_opts.append("--verbose") # Print filelist
7324 if "--verbose" in myopts:
7325 rsync_opts.append("--progress") # Progress meter for each file
7327 if "--debug" in myopts:
7328 rsync_opts.append("--checksum") # Force checksum on all files
7330 # Real local timestamp file.
7331 servertimestampfile = os.path.join(
7332 myportdir, "metadata", "timestamp.chk")
7334 content = portage.util.grabfile(servertimestampfile)
7338 mytimestamp = time.mktime(time.strptime(content[0],
7339 "%a, %d %b %Y %H:%M:%S +0000"))
7340 except (OverflowError, ValueError):
7345 rsync_initial_timeout = \
7346 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
7348 rsync_initial_timeout = 15
7351 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
7352 except SystemExit, e:
7353 raise # Needed else can't exit
7355 maxretries=3 #default number of retries
7358 user_name, hostname, port = re.split(
7359 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
7362 if user_name is None:
7364 updatecache_flg=True
7365 all_rsync_opts = set(rsync_opts)
7366 extra_rsync_opts = shlex.split(
7367 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
7368 all_rsync_opts.update(extra_rsync_opts)
7369 family = socket.AF_INET
7370 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
7371 family = socket.AF_INET
7372 elif socket.has_ipv6 and \
7373 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
7374 family = socket.AF_INET6
7376 SERVER_OUT_OF_DATE = -1
7377 EXCEEDED_MAX_RETRIES = -2
7383 for addrinfo in socket.getaddrinfo(
7384 hostname, None, family, socket.SOCK_STREAM):
7385 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
7386 # IPv6 addresses need to be enclosed in square brackets
7387 ips.append("[%s]" % addrinfo[4][0])
7389 ips.append(addrinfo[4][0])
7390 from random import shuffle
7392 except SystemExit, e:
7393 raise # Needed else can't exit
7394 except Exception, e:
7395 print "Notice:",str(e)
7400 dosyncuri = syncuri.replace(
7401 "//" + user_name + hostname + port + "/",
7402 "//" + user_name + ips[0] + port + "/", 1)
7403 except SystemExit, e:
7404 raise # Needed else can't exit
7405 except Exception, e:
7406 print "Notice:",str(e)
7410 if "--ask" in myopts:
7411 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
7416 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
7417 if "--quiet" not in myopts:
7418 print ">>> Starting rsync with "+dosyncuri+"..."
7420 emergelog(xterm_titles,
7421 ">>> Starting retry %d of %d with %s" % \
7422 (retries,maxretries,dosyncuri))
7423 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
7425 if mytimestamp != 0 and "--quiet" not in myopts:
7426 print ">>> Checking server timestamp ..."
7428 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
7430 if "--debug" in myopts:
7435 # Even if there's no timestamp available locally, fetch the
7436 # timestamp anyway as an initial probe to verify that the server is
7437 # responsive. This protects us from hanging indefinitely on a
7438 # connection attempt to an unresponsive server which rsync's
7439 # --timeout option does not prevent.
7441 # Temporary file for remote server timestamp comparison.
7442 from tempfile import mkstemp
7443 fd, tmpservertimestampfile = mkstemp()
7445 mycommand = rsynccommand[:]
7446 mycommand.append(dosyncuri.rstrip("/") + \
7447 "/metadata/timestamp.chk")
7448 mycommand.append(tmpservertimestampfile)
7452 def timeout_handler(signum, frame):
7453 raise portage.exception.PortageException("timed out")
7454 signal.signal(signal.SIGALRM, timeout_handler)
7455 # Timeout here in case the server is unresponsive. The
7456 # --timeout rsync option doesn't apply to the initial
7457 # connection attempt.
7458 if rsync_initial_timeout:
7459 signal.alarm(rsync_initial_timeout)
7461 mypids.extend(portage.process.spawn(
7462 mycommand, env=settings.environ(), returnpid=True))
7463 exitcode = os.waitpid(mypids[0], 0)[1]
7464 content = portage.grabfile(tmpservertimestampfile)
7466 if rsync_initial_timeout:
7469 os.unlink(tmpservertimestampfile)
7472 except portage.exception.PortageException, e:
7476 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
7477 os.kill(mypids[0], signal.SIGTERM)
7478 os.waitpid(mypids[0], 0)
7479 # This is the same code rsync uses for timeout.
7482 if exitcode != os.EX_OK:
7484 exitcode = (exitcode & 0xff) << 8
7486 exitcode = exitcode >> 8
7488 portage.process.spawned_pids.remove(mypids[0])
7491 servertimestamp = time.mktime(time.strptime(
7492 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
7493 except (OverflowError, ValueError):
7495 del mycommand, mypids, content
7496 if exitcode == os.EX_OK:
7497 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
7498 emergelog(xterm_titles,
7499 ">>> Cancelling sync -- Already current.")
7502 print ">>> Timestamps on the server and in the local repository are the same."
7503 print ">>> Cancelling all further sync action. You are already up to date."
7505 print ">>> In order to force sync, remove '%s'." % servertimestampfile
7509 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
7510 emergelog(xterm_titles,
7511 ">>> Server out of date: %s" % dosyncuri)
7514 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
7516 print ">>> In order to force sync, remove '%s'." % servertimestampfile
7519 exitcode = SERVER_OUT_OF_DATE
7520 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
7522 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
7523 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
7524 if exitcode in [0,1,3,4,11,14,20,21]:
7526 elif exitcode in [1,3,4,11,14,20,21]:
7529 # Code 2 indicates protocol incompatibility, which is expected
7530 # for servers with protocol < 29 that don't support
7531 # --prune-empty-directories. Retry for a server that supports
7532 # at least rsync protocol version 29 (>=rsync-2.6.4).
7537 if retries<=maxretries:
7538 print ">>> Retrying..."
7543 updatecache_flg=False
7544 exitcode = EXCEEDED_MAX_RETRIES
7548 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
7549 elif exitcode == SERVER_OUT_OF_DATE:
7551 elif exitcode == EXCEEDED_MAX_RETRIES:
7553 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
7558 msg.append("Rsync has reported that there is a syntax error. Please ensure")
7559 msg.append("that your SYNC statement is proper.")
7560 msg.append("SYNC=" + settings["SYNC"])
7562 msg.append("Rsync has reported that there is a File IO error. Normally")
7563 msg.append("this means your disk is full, but can be caused by corruption")
7564 msg.append("on the filesystem that contains PORTDIR. Please investigate")
7565 msg.append("and try again after the problem has been fixed.")
7566 msg.append("PORTDIR=" + settings["PORTDIR"])
7568 msg.append("Rsync was killed before it finished.")
7570 msg.append("Rsync has not successfully finished. It is recommended that you keep")
7571 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
7572 msg.append("to use rsync due to firewall or other restrictions. This should be a")
7573 msg.append("temporary problem unless complications exist with your network")
7574 msg.append("(and possibly your system's filesystem) configuration.")
7578 elif syncuri[:6]=="cvs://":
7579 if not os.path.exists("/usr/bin/cvs"):
7580 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
7581 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
7584 cvsdir=os.path.dirname(myportdir)
7585 if not os.path.exists(myportdir+"/CVS"):
7587 print ">>> Starting initial cvs checkout with "+syncuri+"..."
7588 if os.path.exists(cvsdir+"/gentoo-x86"):
7589 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
7594 if e.errno != errno.ENOENT:
7596 "!!! existing '%s' directory; exiting.\n" % myportdir)
7599 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
7600 print "!!! cvs checkout error; exiting."
7602 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
7605 print ">>> Starting cvs update with "+syncuri+"..."
7606 retval = portage.process.spawn_bash(
7607 "cd %s; cvs -z0 -q update -dP" % \
7608 (portage._shell_quote(myportdir),), **spawn_kwargs)
7609 if retval != os.EX_OK:
7613 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
7614 noiselevel=-1, level=logging.ERROR)
7617 if updatecache_flg and \
7618 myaction != "metadata" and \
7619 "metadata-transfer" not in settings.features:
7620 updatecache_flg = False
7622 # Reload the whole config from scratch.
7623 settings, trees, mtimedb = load_emerge_config(trees=trees)
7624 root_config = trees[settings["ROOT"]]["root_config"]
7625 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7627 if updatecache_flg and \
7628 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
7630 # Only update cache for myportdir since that's
7631 # the only one that's been synced here.
7632 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
7634 if portage._global_updates(trees, mtimedb["updates"]):
7636 # Reload the whole config from scratch.
7637 settings, trees, mtimedb = load_emerge_config(trees=trees)
7638 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7639 root_config = trees[settings["ROOT"]]["root_config"]
7641 mybestpv = portdb.xmatch("bestmatch-visible",
7642 portage.const.PORTAGE_PACKAGE_ATOM)
7643 mypvs = portage.best(
7644 trees[settings["ROOT"]]["vartree"].dbapi.match(
7645 portage.const.PORTAGE_PACKAGE_ATOM))
7647 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
7649 if myaction != "metadata":
7650 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
7651 retval = portage.process.spawn(
7652 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
7653 dosyncuri], env=settings.environ())
7654 if retval != os.EX_OK:
7655 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
7657 if(mybestpv != mypvs) and not "--quiet" in myopts:
7659 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
7660 print red(" * ")+"that you update portage now, before any other packages are updated."
7662 print red(" * ")+"To update portage, run 'emerge portage' now."
7665 display_news_notification(root_config, myopts)
7668 def git_sync_timestamps(settings, portdir):
7670 Since git doesn't preserve timestamps, synchronize timestamps between
7671 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
7672 for a given file as long as the file in the working tree is not modified
7675 cache_dir = os.path.join(portdir, "metadata", "cache")
7676 if not os.path.isdir(cache_dir):
7678 writemsg_level(">>> Synchronizing timestamps...\n")
7680 from portage.cache.cache_errors import CacheError
7682 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
7683 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
7684 except CacheError, e:
7685 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
7686 level=logging.ERROR, noiselevel=-1)
7689 ec_dir = os.path.join(portdir, "eclass")
7691 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
7692 if f.endswith(".eclass"))
7694 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
7695 level=logging.ERROR, noiselevel=-1)
7698 args = [portage.const.BASH_BINARY, "-c",
7699 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
7700 portage._shell_quote(portdir)]
7702 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
7703 modified_files = set(l.rstrip("\n") for l in proc.stdout)
7705 if rval != os.EX_OK:
7708 modified_eclasses = set(ec for ec in ec_names \
7709 if os.path.join("eclass", ec + ".eclass") in modified_files)
7711 updated_ec_mtimes = {}
7713 for cpv in cache_db:
7714 cpv_split = portage.catpkgsplit(cpv)
7715 if cpv_split is None:
7716 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
7717 level=logging.ERROR, noiselevel=-1)
7720 cat, pn, ver, rev = cpv_split
7721 cat, pf = portage.catsplit(cpv)
7722 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
7723 if relative_eb_path in modified_files:
7727 cache_entry = cache_db[cpv]
7728 eb_mtime = cache_entry.get("_mtime_")
7729 ec_mtimes = cache_entry.get("_eclasses_")
7731 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
7732 level=logging.ERROR, noiselevel=-1)
7734 except CacheError, e:
7735 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
7736 (cpv, e), level=logging.ERROR, noiselevel=-1)
7739 if eb_mtime is None:
7740 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
7741 level=logging.ERROR, noiselevel=-1)
7745 eb_mtime = long(eb_mtime)
7747 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
7748 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
7751 if ec_mtimes is None:
7752 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
7753 level=logging.ERROR, noiselevel=-1)
7756 if modified_eclasses.intersection(ec_mtimes):
7759 missing_eclasses = set(ec_mtimes).difference(ec_names)
7760 if missing_eclasses:
7761 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
7762 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
7766 eb_path = os.path.join(portdir, relative_eb_path)
7768 current_eb_mtime = os.stat(eb_path)
7770 writemsg_level("!!! Missing ebuild: %s\n" % \
7771 (cpv,), level=logging.ERROR, noiselevel=-1)
7774 inconsistent = False
7775 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
7776 updated_mtime = updated_ec_mtimes.get(ec)
7777 if updated_mtime is not None and updated_mtime != ec_mtime:
7778 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
7779 (cpv, ec), level=logging.ERROR, noiselevel=-1)
7786 if current_eb_mtime != eb_mtime:
7787 os.utime(eb_path, (eb_mtime, eb_mtime))
7789 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
7790 if ec in updated_ec_mtimes:
7792 ec_path = os.path.join(ec_dir, ec + ".eclass")
7793 current_mtime = long(os.stat(ec_path).st_mtime)
7794 if current_mtime != ec_mtime:
7795 os.utime(ec_path, (ec_mtime, ec_mtime))
7796 updated_ec_mtimes[ec] = ec_mtime
7800 def action_metadata(settings, portdb, myopts, porttrees=None):
7801 if porttrees is None:
7802 porttrees = portdb.porttrees
7803 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
7804 old_umask = os.umask(0002)
7805 cachedir = os.path.normpath(settings.depcachedir)
7806 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
7807 "/lib", "/opt", "/proc", "/root", "/sbin",
7808 "/sys", "/tmp", "/usr", "/var"]:
7809 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
7810 "ROOT DIRECTORY ON YOUR SYSTEM."
7811 print >> sys.stderr, \
7812 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
7814 if not os.path.exists(cachedir):
7815 os.makedirs(cachedir)
7817 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
7818 auxdbkeys = tuple(auxdbkeys)
7820 class TreeData(object):
7821 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
7822 def __init__(self, dest_db, eclass_db, path, src_db):
7823 self.dest_db = dest_db
7824 self.eclass_db = eclass_db
7826 self.src_db = src_db
7827 self.valid_nodes = set()
7830 for path in porttrees:
7831 src_db = portdb._pregen_auxdb.get(path)
7832 if src_db is None and \
7833 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
7834 src_db = portdb.metadbmodule(
7835 path, 'metadata/cache', auxdbkeys, readonly=True)
7837 src_db.ec = portdb._repo_info[path].eclass_db
7838 except AttributeError:
7841 if src_db is not None:
7842 porttrees_data.append(TreeData(portdb.auxdb[path],
7843 portdb._repo_info[path].eclass_db, path, src_db))
7845 porttrees = [tree_data.path for tree_data in porttrees_data]
7847 isatty = sys.stdout.isatty()
7848 quiet = not isatty or '--quiet' in myopts
7851 progressBar = portage.output.TermProgressBar()
7852 progressHandler = ProgressHandler()
7853 onProgress = progressHandler.onProgress
7855 progressBar.set(progressHandler.curval, progressHandler.maxval)
7856 progressHandler.display = display
7857 def sigwinch_handler(signum, frame):
7858 lines, progressBar.term_columns = \
7859 portage.output.get_term_size()
7860 signal.signal(signal.SIGWINCH, sigwinch_handler)
7862 # Temporarily override portdb.porttrees so portdb.cp_all()
7863 # will only return the relevant subset.
7864 portdb_porttrees = portdb.porttrees
7865 portdb.porttrees = porttrees
7867 cp_all = portdb.cp_all()
7869 portdb.porttrees = portdb_porttrees
7872 maxval = len(cp_all)
7873 if onProgress is not None:
7874 onProgress(maxval, curval)
7876 from portage.cache.util import quiet_mirroring
7877 from portage import eapi_is_supported, \
7878 _validate_cache_for_unsupported_eapis
7880 # TODO: Display error messages, but do not interfere with the progress bar.
7882 # 1) erase the progress bar
7883 # 2) show the error message
7884 # 3) redraw the progress bar on a new line
7885 noise = quiet_mirroring()
7888 for tree_data in porttrees_data:
7889 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
7890 tree_data.valid_nodes.add(cpv)
7892 src = tree_data.src_db[cpv]
7894 noise.missing_entry(cpv)
7897 except CacheError, ce:
7898 noise.exception(cpv, ce)
7902 eapi = src.get('EAPI')
7905 eapi = eapi.lstrip('-')
7906 eapi_supported = eapi_is_supported(eapi)
7907 if not eapi_supported:
7908 if not _validate_cache_for_unsupported_eapis:
7909 noise.misc(cpv, "unable to validate " + \
7910 "cache for EAPI='%s'" % eapi)
7915 dest = tree_data.dest_db[cpv]
7916 except (KeyError, CacheError):
7919 for d in (src, dest):
7920 if d is not None and d.get('EAPI') in ('', '0'):
7923 if dest is not None:
7924 if not (dest['_mtime_'] == src['_mtime_'] and \
7925 tree_data.eclass_db.is_eclass_data_valid(
7926 dest['_eclasses_']) and \
7927 set(dest['_eclasses_']) == set(src['_eclasses_'])):
7930 # We don't want to skip the write unless we're really
7931 # sure that the existing cache is identical, so don't
7932 # trust _mtime_ and _eclasses_ alone.
7933 for k in set(chain(src, dest)).difference(
7934 ('_mtime_', '_eclasses_')):
7935 if dest.get(k, '') != src.get(k, ''):
7939 if dest is not None:
7940 # The existing data is valid and identical,
7941 # so there's no need to overwrite it.
7945 inherited = src.get('INHERITED', '')
7946 eclasses = src.get('_eclasses_')
7947 except CacheError, ce:
7948 noise.exception(cpv, ce)
7952 if eclasses is not None:
7953 if not tree_data.eclass_db.is_eclass_data_valid(
7955 noise.eclass_stale(cpv)
7957 inherited = eclasses
7959 inherited = inherited.split()
7961 if tree_data.src_db.complete_eclass_entries and \
7963 noise.corruption(cpv, "missing _eclasses_ field")
7967 # Even if _eclasses_ already exists, replace it with data from
7968 # eclass_cache, in order to insert local eclass paths.
7970 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
7972 # INHERITED contains a non-existent eclass.
7973 noise.eclass_stale(cpv)
7976 if eclasses is None:
7977 noise.eclass_stale(cpv)
7979 src['_eclasses_'] = eclasses
7981 src['_eclasses_'] = {}
7983 if not eapi_supported:
7985 'EAPI' : '-' + eapi,
7986 '_mtime_' : src['_mtime_'],
7987 '_eclasses_' : src['_eclasses_'],
7991 tree_data.dest_db[cpv] = src
7992 except CacheError, ce:
7993 noise.exception(cpv, ce)
7997 if onProgress is not None:
7998 onProgress(maxval, curval)
8000 if onProgress is not None:
8001 onProgress(maxval, curval)
8003 for tree_data in porttrees_data:
8005 dead_nodes = set(tree_data.dest_db.iterkeys())
8006 except CacheError, e:
8007 writemsg_level("Error listing cache entries for " + \
8008 "'%s': %s, continuing...\n" % (tree_data.path, e),
8009 level=logging.ERROR, noiselevel=-1)
8012 dead_nodes.difference_update(tree_data.valid_nodes)
8013 for cpv in dead_nodes:
8015 del tree_data.dest_db[cpv]
8016 except (KeyError, CacheError):
8020 # make sure the final progress is displayed
8021 progressHandler.display()
8023 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
8028 def action_regen(settings, portdb, max_jobs, max_load):
8029 xterm_titles = "notitles" not in settings.features
8030 emergelog(xterm_titles, " === regen")
8031 #regenerate cache entries
8032 portage.writemsg_stdout("Regenerating cache entries...\n")
8034 os.close(sys.stdin.fileno())
8035 except SystemExit, e:
8036 raise # Needed else can't exit
8041 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
8044 portage.writemsg_stdout("done!\n")
8045 return regen.returncode
8047 def action_config(settings, trees, myopts, myfiles):
8048 if len(myfiles) != 1:
8049 print red("!!! config can only take a single package atom at this time\n")
8051 if not is_valid_package_atom(myfiles[0]):
8052 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
8054 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
8055 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
8059 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
8060 except portage.exception.AmbiguousPackageName, e:
8061 # Multiple matches thrown from cpv_expand
8064 print "No packages found.\n"
8067 if "--ask" in myopts:
8069 print "Please select a package to configure:"
8073 options.append(str(idx))
8074 print options[-1]+") "+pkg
8077 idx = userquery("Selection?", options)
8080 pkg = pkgs[int(idx)-1]
8082 print "The following packages available:"
8085 print "\nPlease use a specific atom or the --ask option."
8091 if "--ask" in myopts:
8092 if userquery("Ready to configure "+pkg+"?") == "No":
8095 print "Configuring pkg..."
8097 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
8098 mysettings = portage.config(clone=settings)
8099 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
8100 debug = mysettings.get("PORTAGE_DEBUG") == "1"
8101 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
8103 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
8104 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
8105 if retval == os.EX_OK:
8106 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
8107 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
8110 def action_info(settings, trees, myopts, myfiles):
8111 print getportageversion(settings["PORTDIR"], settings["ROOT"],
8112 settings.profile_path, settings["CHOST"],
8113 trees[settings["ROOT"]]["vartree"].dbapi)
8115 header_title = "System Settings"
8117 print header_width * "="
8118 print header_title.rjust(int(header_width/2 + len(header_title)/2))
8119 print header_width * "="
8120 print "System uname: "+platform.platform(aliased=1)
8122 lastSync = portage.grabfile(os.path.join(
8123 settings["PORTDIR"], "metadata", "timestamp.chk"))
8124 print "Timestamp of tree:",
8130 output=commands.getstatusoutput("distcc --version")
8132 print str(output[1].split("\n",1)[0]),
8133 if "distcc" in settings.features:
8138 output=commands.getstatusoutput("ccache -V")
8140 print str(output[1].split("\n",1)[0]),
8141 if "ccache" in settings.features:
8146 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
8147 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
8148 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
8149 myvars = portage.util.unique_array(myvars)
8153 if portage.isvalidatom(x):
8154 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
8155 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
8156 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
8158 for pn, ver, rev in pkg_matches:
8160 pkgs.append(ver + "-" + rev)
8164 pkgs = ", ".join(pkgs)
8165 print "%-20s %s" % (x+":", pkgs)
8167 print "%-20s %s" % (x+":", "[NOT VALID]")
8169 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
8171 if "--verbose" in myopts:
8172 myvars=settings.keys()
8174 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
8175 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
8176 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
8177 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
8179 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
8181 myvars = portage.util.unique_array(myvars)
8182 use_expand = settings.get('USE_EXPAND', '').split()
8184 use_expand_hidden = set(
8185 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
8186 alphabetical_use = '--alphabetical' in myopts
8187 root_config = trees[settings["ROOT"]]['root_config']
8193 print '%s="%s"' % (x, settings[x])
8195 use = set(settings["USE"].split())
8196 for varname in use_expand:
8197 flag_prefix = varname.lower() + "_"
8199 if f.startswith(flag_prefix):
8203 print 'USE="%s"' % " ".join(use),
8204 for varname in use_expand:
8205 myval = settings.get(varname)
8207 print '%s="%s"' % (varname, myval),
8210 unset_vars.append(x)
8212 print "Unset: "+", ".join(unset_vars)
8215 if "--debug" in myopts:
8216 for x in dir(portage):
8217 module = getattr(portage, x)
8218 if "cvs_id_string" in dir(module):
8219 print "%s: %s" % (str(x), str(module.cvs_id_string))
8221 # See if we can find any packages installed matching the strings
8222 # passed on the command line
8224 vardb = trees[settings["ROOT"]]["vartree"].dbapi
8225 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8227 mypkgs.extend(vardb.match(x))
8229 # If some packages were found...
8231 # Get our global settings (we only print stuff if it varies from
8232 # the current config)
8233 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
8234 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
8235 auxkeys.append('DEFINED_PHASES')
8237 pkgsettings = portage.config(clone=settings)
8239 # Loop through each package
8240 # Only print settings if they differ from global settings
8241 header_title = "Package Settings"
8242 print header_width * "="
8243 print header_title.rjust(int(header_width/2 + len(header_title)/2))
8244 print header_width * "="
8245 from portage.output import EOutput
8248 # Get all package specific variables
8249 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
8250 pkg = Package(built=True, cpv=cpv,
8251 installed=True, metadata=izip(Package.metadata_keys,
8252 (metadata.get(x, '') for x in Package.metadata_keys)),
8253 root_config=root_config, type_name='installed')
8255 print "\n%s was built with the following:" % \
8256 colorize("INFORM", str(pkg.cpv))
8258 pkgsettings.setcpv(pkg)
8259 forced_flags = set(chain(pkgsettings.useforce,
8260 pkgsettings.usemask))
8261 use = set(pkg.use.enabled)
8262 use.discard(pkgsettings.get('ARCH'))
8263 use_expand_flags = set()
8266 for varname in use_expand:
8267 flag_prefix = varname.lower() + "_"
8269 if f.startswith(flag_prefix):
8270 use_expand_flags.add(f)
8271 use_enabled.setdefault(
8272 varname.upper(), []).append(f[len(flag_prefix):])
8274 for f in pkg.iuse.all:
8275 if f.startswith(flag_prefix):
8276 use_expand_flags.add(f)
8278 use_disabled.setdefault(
8279 varname.upper(), []).append(f[len(flag_prefix):])
8281 var_order = set(use_enabled)
8282 var_order.update(use_disabled)
8283 var_order = sorted(var_order)
8284 var_order.insert(0, 'USE')
8285 use.difference_update(use_expand_flags)
8286 use_enabled['USE'] = list(use)
8287 use_disabled['USE'] = []
8289 for f in pkg.iuse.all:
8290 if f not in use and \
8291 f not in use_expand_flags:
8292 use_disabled['USE'].append(f)
8294 for varname in var_order:
8295 if varname in use_expand_hidden:
8298 for f in use_enabled.get(varname, []):
8299 flags.append(UseFlagDisplay(f, True, f in forced_flags))
8300 for f in use_disabled.get(varname, []):
8301 flags.append(UseFlagDisplay(f, False, f in forced_flags))
8302 if alphabetical_use:
8303 flags.sort(key=UseFlagDisplay.sort_combined)
8305 flags.sort(key=UseFlagDisplay.sort_separated)
8306 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
8309 for myvar in mydesiredvars:
8310 if metadata[myvar].split() != settings.get(myvar, '').split():
8311 print "%s=\"%s\"" % (myvar, metadata[myvar])
8314 if metadata['DEFINED_PHASES']:
8315 if 'info' not in metadata['DEFINED_PHASES'].split():
8318 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
8319 ebuildpath = vardb.findname(pkg.cpv)
8320 if not ebuildpath or not os.path.exists(ebuildpath):
8321 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
8323 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
8324 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
8325 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
8328 def action_search(root_config, myopts, myfiles, spinner):
8330 print "emerge: no search terms provided."
8332 searchinstance = search(root_config,
8333 spinner, "--searchdesc" in myopts,
8334 "--quiet" not in myopts, "--usepkg" in myopts,
8335 "--usepkgonly" in myopts)
8336 for mysearch in myfiles:
8338 searchinstance.execute(mysearch)
8339 except re.error, comment:
8340 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
8342 searchinstance.output()
8344 def action_uninstall(settings, trees, ldpath_mtimes,
8345 opts, action, files, spinner):
8347 # For backward compat, some actions do not require leading '='.
8348 ignore_missing_eq = action in ('clean', 'unmerge')
8349 root = settings['ROOT']
8350 vardb = trees[root]['vartree'].dbapi
8354 # Ensure atoms are valid before calling unmerge().
8355 # For backward compat, leading '=' is not required.
8357 if is_valid_package_atom(x) or \
8358 (ignore_missing_eq and is_valid_package_atom('=' + x)):
8362 portage.dep_expand(x, mydb=vardb, settings=settings))
8363 except portage.exception.AmbiguousPackageName, e:
8364 msg = "The short ebuild name \"" + x + \
8365 "\" is ambiguous. Please specify " + \
8366 "one of the following " + \
8367 "fully-qualified ebuild names instead:"
8368 for line in textwrap.wrap(msg, 70):
8369 writemsg_level("!!! %s\n" % (line,),
8370 level=logging.ERROR, noiselevel=-1)
8372 writemsg_level(" %s\n" % colorize("INFORM", i),
8373 level=logging.ERROR, noiselevel=-1)
8374 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
8377 elif x.startswith(os.sep):
8378 if not x.startswith(root):
8379 writemsg_level(("!!! '%s' does not start with" + \
8380 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
8382 # Queue these up since it's most efficient to handle
8383 # multiple files in a single iter_owners() call.
8384 lookup_owners.append(x)
8388 msg.append("'%s' is not a valid package atom." % (x,))
8389 msg.append("Please check ebuild(5) for full details.")
8390 writemsg_level("".join("!!! %s\n" % line for line in msg),
8391 level=logging.ERROR, noiselevel=-1)
8396 search_for_multiple = False
8397 if len(lookup_owners) > 1:
8398 search_for_multiple = True
8400 for x in lookup_owners:
8401 if not search_for_multiple and os.path.isdir(x):
8402 search_for_multiple = True
8403 relative_paths.append(x[len(root):])
8406 for pkg, relative_path in \
8407 vardb._owners.iter_owners(relative_paths):
8408 owners.add(pkg.mycpv)
8409 if not search_for_multiple:
8414 slot = vardb.aux_get(cpv, ['SLOT'])[0]
8416 # portage now masks packages with missing slot, but it's
8417 # possible that one was installed by an older version
8418 atom = portage.cpv_getkey(cpv)
8420 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
8421 valid_atoms.append(portage.dep.Atom(atom))
8423 writemsg_level(("!!! '%s' is not claimed " + \
8424 "by any package.\n") % lookup_owners[0],
8425 level=logging.WARNING, noiselevel=-1)
8427 if files and not valid_atoms:
8430 if action in ('clean', 'unmerge') or \
8431 (action == 'prune' and "--nodeps" in opts):
8432 # When given a list of atoms, unmerge them in the order given.
8433 ordered = action == 'unmerge'
8434 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
8435 valid_atoms, ldpath_mtimes, ordered=ordered)
8437 elif action == 'deselect':
8438 rval = action_deselect(settings, trees, opts, valid_atoms)
8440 rval = action_depclean(settings, trees, ldpath_mtimes,
8441 opts, action, valid_atoms, spinner)
8445 def action_deselect(settings, trees, opts, atoms):
8446 root_config = trees[settings['ROOT']]['root_config']
8447 world_set = root_config.sets['world']
8448 if not hasattr(world_set, 'update'):
8449 writemsg_level("World set does not appear to be mutable.\n",
8450 level=logging.ERROR, noiselevel=-1)
8453 vardb = root_config.trees['vartree'].dbapi
8454 expanded_atoms = set(atoms)
8455 from portage.dep import Atom
8457 for cpv in vardb.match(atom):
8458 slot, = vardb.aux_get(cpv, ['SLOT'])
8461 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
8463 pretend = '--pretend' in opts
8465 if not pretend and hasattr(world_set, 'lock'):
8469 discard_atoms = set()
8471 for atom in world_set:
8472 if not isinstance(atom, Atom):
8475 for arg_atom in expanded_atoms:
8476 if arg_atom.intersects(atom) and \
8477 not (arg_atom.slot and not atom.slot):
8478 discard_atoms.add(atom)
8481 for atom in sorted(discard_atoms):
8482 print ">>> Removing %s from \"world\" favorites file..." % \
8483 colorize("INFORM", str(atom))
8486 prompt = "Would you like to remove these " + \
8487 "packages from your world favorites?"
8488 if userquery(prompt) == 'No':
8491 remaining = set(world_set)
8492 remaining.difference_update(discard_atoms)
8494 world_set.replace(remaining)
8496 print ">>> No matching atoms found in \"world\" favorites file..."
8502 def action_depclean(settings, trees, ldpath_mtimes,
8503 myopts, action, myfiles, spinner):
8504 # Kill packages that aren't explicitly merged or are required as a
8505 # dependency of another package. World file is explicit.
8507 # Global depclean or prune operations are not very safe when there are
8508 # missing dependencies since it's unknown how badly incomplete
8509 # the dependency graph is, and we might accidentally remove packages
8510 # that should have been pulled into the graph. On the other hand, it's
8511 # relatively safe to ignore missing deps when only asked to remove
8512 # specific packages.
8513 allow_missing_deps = len(myfiles) > 0
8516 msg.append("Always study the list of packages to be cleaned for any obvious\n")
8517 msg.append("mistakes. Packages that are part of the world set will always\n")
8518 msg.append("be kept. They can be manually added to this set with\n")
8519 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
8520 msg.append("package.provided (see portage(5)) will be removed by\n")
8521 msg.append("depclean, even if they are part of the world set.\n")
8523 msg.append("As a safety measure, depclean will not remove any packages\n")
8524 msg.append("unless *all* required dependencies have been resolved. As a\n")
8525 msg.append("consequence, it is often necessary to run %s\n" % \
8526 good("`emerge --update"))
8527 msg.append(good("--newuse --deep @system @world`") + \
8528 " prior to depclean.\n")
8530 if action == "depclean" and "--quiet" not in myopts and not myfiles:
8531 portage.writemsg_stdout("\n")
8533 portage.writemsg_stdout(colorize("WARN", " * ") + x)
8535 xterm_titles = "notitles" not in settings.features
8536 myroot = settings["ROOT"]
8537 root_config = trees[myroot]["root_config"]
8538 getSetAtoms = root_config.setconfig.getSetAtoms
8539 vardb = trees[myroot]["vartree"].dbapi
8540 deselect = myopts.get('--deselect') != 'n'
8542 required_set_names = ("system", "world")
8546 for s in required_set_names:
8547 required_sets[s] = InternalPackageSet(
8548 initial_atoms=getSetAtoms(s))
8551 # When removing packages, use a temporary version of world
8552 # which excludes packages that are intended to be eligible for
8554 world_temp_set = required_sets["world"]
8555 system_set = required_sets["system"]
8557 if not system_set or not world_temp_set:
8560 writemsg_level("!!! You have no system list.\n",
8561 level=logging.ERROR, noiselevel=-1)
8563 if not world_temp_set:
8564 writemsg_level("!!! You have no world file.\n",
8565 level=logging.WARNING, noiselevel=-1)
8567 writemsg_level("!!! Proceeding is likely to " + \
8568 "break your installation.\n",
8569 level=logging.WARNING, noiselevel=-1)
8570 if "--pretend" not in myopts:
8571 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
8573 if action == "depclean":
8574 emergelog(xterm_titles, " >>> depclean")
8577 args_set = InternalPackageSet()
8579 args_set.update(myfiles)
8580 matched_packages = False
8583 matched_packages = True
8585 if not matched_packages:
8586 writemsg_level(">>> No packages selected for removal by %s\n" % \
8590 writemsg_level("\nCalculating dependencies ")
8591 resolver_params = create_depgraph_params(myopts, "remove")
8592 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
8593 vardb = resolver.trees[myroot]["vartree"].dbapi
8595 if action == "depclean":
8600 world_temp_set.clear()
8602 # Pull in everything that's installed but not matched
8603 # by an argument atom since we don't want to clean any
8604 # package if something depends on it.
8609 if args_set.findAtomForPackage(pkg) is None:
8610 world_temp_set.add("=" + pkg.cpv)
8612 except portage.exception.InvalidDependString, e:
8613 show_invalid_depstring_notice(pkg,
8614 pkg.metadata["PROVIDE"], str(e))
8616 world_temp_set.add("=" + pkg.cpv)
8619 elif action == "prune":
8622 world_temp_set.clear()
8624 # Pull in everything that's installed since we don't
8625 # to prune a package if something depends on it.
8626 world_temp_set.update(vardb.cp_all())
8630 # Try to prune everything that's slotted.
8631 for cp in vardb.cp_all():
8632 if len(vardb.cp_list(cp)) > 1:
8635 # Remove atoms from world that match installed packages
8636 # that are also matched by argument atoms, but do not remove
8637 # them if they match the highest installed version.
8640 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
8641 if not pkgs_for_cp or pkg not in pkgs_for_cp:
8642 raise AssertionError("package expected in matches: " + \
8643 "cp = %s, cpv = %s matches = %s" % \
8644 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
8646 highest_version = pkgs_for_cp[-1]
8647 if pkg == highest_version:
8648 # pkg is the highest version
8649 world_temp_set.add("=" + pkg.cpv)
8652 if len(pkgs_for_cp) <= 1:
8653 raise AssertionError("more packages expected: " + \
8654 "cp = %s, cpv = %s matches = %s" % \
8655 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
8658 if args_set.findAtomForPackage(pkg) is None:
8659 world_temp_set.add("=" + pkg.cpv)
8661 except portage.exception.InvalidDependString, e:
8662 show_invalid_depstring_notice(pkg,
8663 pkg.metadata["PROVIDE"], str(e))
8665 world_temp_set.add("=" + pkg.cpv)
8669 for s, package_set in required_sets.iteritems():
8670 set_atom = SETPREFIX + s
8671 set_arg = SetArg(arg=set_atom, set=package_set,
8672 root_config=resolver.roots[myroot])
8673 set_args[s] = set_arg
8674 for atom in set_arg.set:
8675 resolver._dep_stack.append(
8676 Dependency(atom=atom, root=myroot, parent=set_arg))
8677 resolver.digraph.add(set_arg, None)
8679 success = resolver._complete_graph()
8680 writemsg_level("\b\b... done!\n")
8682 resolver.display_problems()
8687 def unresolved_deps():
8689 unresolvable = set()
8690 for dep in resolver._initially_unsatisfied_deps:
8691 if isinstance(dep.parent, Package) and \
8692 (dep.priority > UnmergeDepPriority.SOFT):
8693 unresolvable.add((dep.atom, dep.parent.cpv))
8695 if not unresolvable:
8698 if unresolvable and not allow_missing_deps:
8701 msg.append("Dependencies could not be completely resolved due to")
8702 msg.append("the following required packages not being installed:")
8704 for atom, parent in unresolvable:
8705 msg.append(" %s pulled in by:" % (atom,))
8706 msg.append(" %s" % (parent,))
8708 msg.append("Have you forgotten to run " + \
8709 good("`emerge --update --newuse --deep @system @world`") + " prior")
8710 msg.append(("to %s? It may be necessary to manually " + \
8711 "uninstall packages that no longer") % action)
8712 msg.append("exist in the portage tree since " + \
8713 "it may not be possible to satisfy their")
8714 msg.append("dependencies. Also, be aware of " + \
8715 "the --with-bdeps option that is documented")
8716 msg.append("in " + good("`man emerge`") + ".")
8717 if action == "prune":
8719 msg.append("If you would like to ignore " + \
8720 "dependencies then use %s." % good("--nodeps"))
8721 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
8722 level=logging.ERROR, noiselevel=-1)
8726 if unresolved_deps():
8729 graph = resolver.digraph.copy()
8730 required_pkgs_total = 0
8732 if isinstance(node, Package):
8733 required_pkgs_total += 1
8735 def show_parents(child_node):
8736 parent_nodes = graph.parent_nodes(child_node)
8737 if not parent_nodes:
8738 # With --prune, the highest version can be pulled in without any
8739 # real parent since all installed packages are pulled in. In that
8740 # case there's nothing to show here.
8743 for node in parent_nodes:
8744 parent_strs.append(str(getattr(node, "cpv", node)))
8747 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
8748 for parent_str in parent_strs:
8749 msg.append(" %s\n" % (parent_str,))
8751 portage.writemsg_stdout("".join(msg), noiselevel=-1)
8753 def cmp_pkg_cpv(pkg1, pkg2):
8754 """Sort Package instances by cpv."""
8755 if pkg1.cpv > pkg2.cpv:
8757 elif pkg1.cpv == pkg2.cpv:
8762 def create_cleanlist():
8765 if action == "depclean":
8768 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
8771 arg_atom = args_set.findAtomForPackage(pkg)
8772 except portage.exception.InvalidDependString:
8773 # this error has already been displayed by now
8777 if pkg not in graph:
8778 pkgs_to_remove.append(pkg)
8779 elif "--verbose" in myopts:
8783 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
8784 if pkg not in graph:
8785 pkgs_to_remove.append(pkg)
8786 elif "--verbose" in myopts:
8789 elif action == "prune":
8790 # Prune really uses all installed instead of world. It's not
8791 # a real reverse dependency so don't display it as such.
8792 graph.remove(set_args["world"])
8794 for atom in args_set:
8795 for pkg in vardb.match_pkgs(atom):
8796 if pkg not in graph:
8797 pkgs_to_remove.append(pkg)
8798 elif "--verbose" in myopts:
8801 if not pkgs_to_remove:
8803 ">>> No packages selected for removal by %s\n" % action)
8804 if "--verbose" not in myopts:
8806 ">>> To see reverse dependencies, use %s\n" % \
8808 if action == "prune":
8810 ">>> To ignore dependencies, use %s\n" % \
8813 return pkgs_to_remove
8815 cleanlist = create_cleanlist()
8818 clean_set = set(cleanlist)
8820 # Check if any of these package are the sole providers of libraries
8821 # with consumers that have not been selected for removal. If so, these
8822 # packages and any dependencies need to be added to the graph.
8823 real_vardb = trees[myroot]["vartree"].dbapi
8824 linkmap = real_vardb.linkmap
8825 liblist = linkmap.listLibraryObjects()
8831 writemsg_level(">>> Checking for lib consumers...\n")
8833 for pkg in cleanlist:
8834 pkg_dblink = real_vardb._dblink(pkg.cpv)
8835 provided_libs = set()
8838 if pkg_dblink.isowner(lib, myroot):
8839 provided_libs.add(lib)
8841 if not provided_libs:
8845 for lib in provided_libs:
8846 lib_consumers = consumer_cache.get(lib)
8847 if lib_consumers is None:
8848 lib_consumers = linkmap.findConsumers(lib)
8849 consumer_cache[lib] = lib_consumers
8851 consumers[lib] = lib_consumers
8856 for lib, lib_consumers in consumers.items():
8857 for consumer_file in list(lib_consumers):
8858 if pkg_dblink.isowner(consumer_file, myroot):
8859 lib_consumers.remove(consumer_file)
8860 if not lib_consumers:
8866 for lib, lib_consumers in consumers.iteritems():
8868 soname = soname_cache.get(lib)
8870 soname = linkmap.getSoname(lib)
8871 soname_cache[lib] = soname
8873 consumer_providers = []
8874 for lib_consumer in lib_consumers:
8875 providers = provider_cache.get(lib)
8876 if providers is None:
8877 providers = linkmap.findProviders(lib_consumer)
8878 provider_cache[lib_consumer] = providers
8879 if soname not in providers:
8880 # Why does this happen?
8882 consumer_providers.append(
8883 (lib_consumer, providers[soname]))
8885 consumers[lib] = consumer_providers
8887 consumer_map[pkg] = consumers
8891 search_files = set()
8892 for consumers in consumer_map.itervalues():
8893 for lib, consumer_providers in consumers.iteritems():
8894 for lib_consumer, providers in consumer_providers:
8895 search_files.add(lib_consumer)
8896 search_files.update(providers)
8898 writemsg_level(">>> Assigning files to packages...\n")
8899 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
8901 for pkg, consumers in consumer_map.items():
8902 for lib, consumer_providers in consumers.items():
8903 lib_consumers = set()
8905 for lib_consumer, providers in consumer_providers:
8906 owner_set = file_owners.get(lib_consumer)
8907 provider_dblinks = set()
8908 provider_pkgs = set()
8910 if len(providers) > 1:
8911 for provider in providers:
8912 provider_set = file_owners.get(provider)
8913 if provider_set is not None:
8914 provider_dblinks.update(provider_set)
8916 if len(provider_dblinks) > 1:
8917 for provider_dblink in provider_dblinks:
8918 pkg_key = ("installed", myroot,
8919 provider_dblink.mycpv, "nomerge")
8920 if pkg_key not in clean_set:
8921 provider_pkgs.add(vardb.get(pkg_key))
8926 if owner_set is not None:
8927 lib_consumers.update(owner_set)
8929 for consumer_dblink in list(lib_consumers):
8930 if ("installed", myroot, consumer_dblink.mycpv,
8931 "nomerge") in clean_set:
8932 lib_consumers.remove(consumer_dblink)
8936 consumers[lib] = lib_consumers
8940 del consumer_map[pkg]
8943 # TODO: Implement a package set for rebuilding consumer packages.
8945 msg = "In order to avoid breakage of link level " + \
8946 "dependencies, one or more packages will not be removed. " + \
8947 "This can be solved by rebuilding " + \
8948 "the packages that pulled them in."
8951 from textwrap import wrap
8952 writemsg_level("".join(prefix + "%s\n" % line for \
8953 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
8956 for pkg, consumers in consumer_map.iteritems():
8957 unique_consumers = set(chain(*consumers.values()))
8958 unique_consumers = sorted(consumer.mycpv \
8959 for consumer in unique_consumers)
8961 msg.append(" %s pulled in by:" % (pkg.cpv,))
8962 for consumer in unique_consumers:
8963 msg.append(" %s" % (consumer,))
8965 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
8966 level=logging.WARNING, noiselevel=-1)
8968 # Add lib providers to the graph as children of lib consumers,
8969 # and also add any dependencies pulled in by the provider.
8970 writemsg_level(">>> Adding lib providers to graph...\n")
8972 for pkg, consumers in consumer_map.iteritems():
8973 for consumer_dblink in set(chain(*consumers.values())):
8974 consumer_pkg = vardb.get(("installed", myroot,
8975 consumer_dblink.mycpv, "nomerge"))
8976 if not resolver._add_pkg(pkg,
8977 Dependency(parent=consumer_pkg,
8978 priority=UnmergeDepPriority(runtime=True),
8980 resolver.display_problems()
8983 writemsg_level("\nCalculating dependencies ")
8984 success = resolver._complete_graph()
8985 writemsg_level("\b\b... done!\n")
8986 resolver.display_problems()
8989 if unresolved_deps():
8992 graph = resolver.digraph.copy()
8993 required_pkgs_total = 0
8995 if isinstance(node, Package):
8996 required_pkgs_total += 1
8997 cleanlist = create_cleanlist()
9000 clean_set = set(cleanlist)
9002 # Use a topological sort to create an unmerge order such that
9003 # each package is unmerged before it's dependencies. This is
9004 # necessary to avoid breaking things that may need to run
9005 # during pkg_prerm or pkg_postrm phases.
9007 # Create a new graph to account for dependencies between the
9008 # packages being unmerged.
9012 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
9013 runtime = UnmergeDepPriority(runtime=True)
9014 runtime_post = UnmergeDepPriority(runtime_post=True)
9015 buildtime = UnmergeDepPriority(buildtime=True)
9018 "PDEPEND": runtime_post,
9019 "DEPEND": buildtime,
9022 for node in clean_set:
9023 graph.add(node, None)
9025 node_use = node.metadata["USE"].split()
9026 for dep_type in dep_keys:
9027 depstr = node.metadata[dep_type]
9031 portage.dep._dep_check_strict = False
9032 success, atoms = portage.dep_check(depstr, None, settings,
9033 myuse=node_use, trees=resolver._graph_trees,
9036 portage.dep._dep_check_strict = True
9038 # Ignore invalid deps of packages that will
9039 # be uninstalled anyway.
9042 priority = priority_map[dep_type]
9044 if not isinstance(atom, portage.dep.Atom):
9045 # Ignore invalid atoms returned from dep_check().
9049 matches = vardb.match_pkgs(atom)
9052 for child_node in matches:
9053 if child_node in clean_set:
9054 graph.add(child_node, node, priority=priority)
9057 if len(graph.order) == len(graph.root_nodes()):
9058 # If there are no dependencies between packages
9059 # let unmerge() group them by cat/pn.
9061 cleanlist = [pkg.cpv for pkg in graph.order]
9063 # Order nodes from lowest to highest overall reference count for
9064 # optimal root node selection.
9066 for node in graph.order:
9067 node_refcounts[node] = len(graph.parent_nodes(node))
9068 def cmp_reference_count(node1, node2):
9069 return node_refcounts[node1] - node_refcounts[node2]
9070 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
9072 ignore_priority_range = [None]
9073 ignore_priority_range.extend(
9074 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
9075 while not graph.empty():
9076 for ignore_priority in ignore_priority_range:
9077 nodes = graph.root_nodes(ignore_priority=ignore_priority)
9081 raise AssertionError("no root nodes")
9082 if ignore_priority is not None:
9083 # Some deps have been dropped due to circular dependencies,
9084 # so only pop one node in order do minimize the number that
9089 cleanlist.append(node.cpv)
9091 unmerge(root_config, myopts, "unmerge", cleanlist,
9092 ldpath_mtimes, ordered=ordered)
9094 if action == "prune":
9097 if not cleanlist and "--quiet" in myopts:
9100 print "Packages installed: "+str(len(vardb.cpv_all()))
9101 print "Packages in world: " + \
9102 str(len(root_config.sets["world"].getAtoms()))
9103 print "Packages in system: " + \
9104 str(len(root_config.sets["system"].getAtoms()))
9105 print "Required packages: "+str(required_pkgs_total)
9106 if "--pretend" in myopts:
9107 print "Number to remove: "+str(len(cleanlist))
9109 print "Number removed: "+str(len(cleanlist))
9111 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
9113 Construct a depgraph for the given resume list. This will raise
9114 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
9116 @returns: (success, depgraph, dropped_tasks)
9119 skip_unsatisfied = True
9120 mergelist = mtimedb["resume"]["mergelist"]
9121 dropped_tasks = set()
9123 mydepgraph = depgraph(settings, trees,
9124 myopts, myparams, spinner)
9126 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
9127 skip_masked=skip_masked)
9128 except depgraph.UnsatisfiedResumeDep, e:
9129 if not skip_unsatisfied:
9132 graph = mydepgraph.digraph
9133 unsatisfied_parents = dict((dep.parent, dep.parent) \
9135 traversed_nodes = set()
9136 unsatisfied_stack = list(unsatisfied_parents)
9137 while unsatisfied_stack:
9138 pkg = unsatisfied_stack.pop()
9139 if pkg in traversed_nodes:
9141 traversed_nodes.add(pkg)
9143 # If this package was pulled in by a parent
9144 # package scheduled for merge, removing this
9145 # package may cause the the parent package's
9146 # dependency to become unsatisfied.
9147 for parent_node in graph.parent_nodes(pkg):
9148 if not isinstance(parent_node, Package) \
9149 or parent_node.operation not in ("merge", "nomerge"):
9152 graph.child_nodes(parent_node,
9153 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
9154 if pkg in unsatisfied:
9155 unsatisfied_parents[parent_node] = parent_node
9156 unsatisfied_stack.append(parent_node)
9158 pruned_mergelist = []
9160 if isinstance(x, list) and \
9161 tuple(x) not in unsatisfied_parents:
9162 pruned_mergelist.append(x)
9164 # If the mergelist doesn't shrink then this loop is infinite.
9165 if len(pruned_mergelist) == len(mergelist):
9166 # This happens if a package can't be dropped because
9167 # it's already installed, but it has unsatisfied PDEPEND.
9169 mergelist[:] = pruned_mergelist
9171 # Exclude installed packages that have been removed from the graph due
9172 # to failure to build/install runtime dependencies after the dependent
9173 # package has already been installed.
9174 dropped_tasks.update(pkg for pkg in \
9175 unsatisfied_parents if pkg.operation != "nomerge")
9176 mydepgraph.break_refs(unsatisfied_parents)
9178 del e, graph, traversed_nodes, \
9179 unsatisfied_parents, unsatisfied_stack
9183 return (success, mydepgraph, dropped_tasks)
9185 def action_build(settings, trees, mtimedb,
9186 myopts, myaction, myfiles, spinner):
9188 # validate the state of the resume data
9189 # so that we can make assumptions later.
9190 for k in ("resume", "resume_backup"):
9191 if k not in mtimedb:
9193 resume_data = mtimedb[k]
9194 if not isinstance(resume_data, dict):
9197 mergelist = resume_data.get("mergelist")
9198 if not isinstance(mergelist, list):
9202 if not (isinstance(x, list) and len(x) == 4):
9204 pkg_type, pkg_root, pkg_key, pkg_action = x
9205 if pkg_root not in trees:
9206 # Current $ROOT setting differs,
9207 # so the list must be stale.
9213 resume_opts = resume_data.get("myopts")
9214 if not isinstance(resume_opts, (dict, list)):
9217 favorites = resume_data.get("favorites")
9218 if not isinstance(favorites, list):
9223 if "--resume" in myopts and \
9224 ("resume" in mtimedb or
9225 "resume_backup" in mtimedb):
9227 if "resume" not in mtimedb:
9228 mtimedb["resume"] = mtimedb["resume_backup"]
9229 del mtimedb["resume_backup"]
9231 # "myopts" is a list for backward compatibility.
9232 resume_opts = mtimedb["resume"].get("myopts", [])
9233 if isinstance(resume_opts, list):
9234 resume_opts = dict((k,True) for k in resume_opts)
9235 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
9236 resume_opts.pop(opt, None)
9238 # Current options always override resume_opts.
9239 resume_opts.update(myopts)
9241 myopts.update(resume_opts)
9243 if "--debug" in myopts:
9244 writemsg_level("myopts %s\n" % (myopts,))
9246 # Adjust config according to options of the command being resumed.
9247 for myroot in trees:
9248 mysettings = trees[myroot]["vartree"].settings
9250 adjust_config(myopts, mysettings)
9252 del myroot, mysettings
9254 ldpath_mtimes = mtimedb["ldpath"]
9257 buildpkgonly = "--buildpkgonly" in myopts
9258 pretend = "--pretend" in myopts
9259 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
9260 ask = "--ask" in myopts
9261 nodeps = "--nodeps" in myopts
9262 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
9263 tree = "--tree" in myopts
9266 del myopts["--tree"]
9267 portage.writemsg(colorize("WARN", " * ") + \
9268 "--tree is broken with --nodeps. Disabling...\n")
9269 debug = "--debug" in myopts
9270 verbose = "--verbose" in myopts
9271 quiet = "--quiet" in myopts
9272 if pretend or fetchonly:
9273 # make the mtimedb readonly
9274 mtimedb.filename = None
9275 if '--digest' in myopts or 'digest' in settings.features:
9276 if '--digest' in myopts:
9277 msg = "The --digest option"
9279 msg = "The FEATURES=digest setting"
9281 msg += " can prevent corruption from being" + \
9282 " noticed. The `repoman manifest` command is the preferred" + \
9283 " way to generate manifests and it is capable of doing an" + \
9284 " entire repository or category at once."
9286 writemsg(prefix + "\n")
9287 from textwrap import wrap
9288 for line in wrap(msg, 72):
9289 writemsg("%s%s\n" % (prefix, line))
9290 writemsg(prefix + "\n")
9292 if "--quiet" not in myopts and \
9293 ("--pretend" in myopts or "--ask" in myopts or \
9294 "--tree" in myopts or "--verbose" in myopts):
9296 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
9298 elif "--buildpkgonly" in myopts:
9302 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
9304 print darkgreen("These are the packages that would be %s, in reverse order:") % action
9308 print darkgreen("These are the packages that would be %s, in order:") % action
9311 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
9312 if not show_spinner:
9313 spinner.update = spinner.update_quiet
9316 favorites = mtimedb["resume"].get("favorites")
9317 if not isinstance(favorites, list):
9321 print "Calculating dependencies ",
9322 myparams = create_depgraph_params(myopts, myaction)
9324 resume_data = mtimedb["resume"]
9325 mergelist = resume_data["mergelist"]
9326 if mergelist and "--skipfirst" in myopts:
9327 for i, task in enumerate(mergelist):
9328 if isinstance(task, list) and \
9329 task and task[-1] == "merge":
9336 success, mydepgraph, dropped_tasks = resume_depgraph(
9337 settings, trees, mtimedb, myopts, myparams, spinner)
9338 except (portage.exception.PackageNotFound,
9339 depgraph.UnsatisfiedResumeDep), e:
9340 if isinstance(e, depgraph.UnsatisfiedResumeDep):
9341 mydepgraph = e.depgraph
9344 from textwrap import wrap
9345 from portage.output import EOutput
9348 resume_data = mtimedb["resume"]
9349 mergelist = resume_data.get("mergelist")
9350 if not isinstance(mergelist, list):
9352 if mergelist and debug or (verbose and not quiet):
9353 out.eerror("Invalid resume list:")
9356 for task in mergelist:
9357 if isinstance(task, list):
9358 out.eerror(indent + str(tuple(task)))
9361 if isinstance(e, depgraph.UnsatisfiedResumeDep):
9362 out.eerror("One or more packages are either masked or " + \
9363 "have missing dependencies:")
9367 if dep.atom is None:
9368 out.eerror(indent + "Masked package:")
9369 out.eerror(2 * indent + str(dep.parent))
9372 out.eerror(indent + str(dep.atom) + " pulled in by:")
9373 out.eerror(2 * indent + str(dep.parent))
9375 msg = "The resume list contains packages " + \
9376 "that are either masked or have " + \
9377 "unsatisfied dependencies. " + \
9378 "Please restart/continue " + \
9379 "the operation manually, or use --skipfirst " + \
9380 "to skip the first package in the list and " + \
9381 "any other packages that may be " + \
9382 "masked or have missing dependencies."
9383 for line in wrap(msg, 72):
9385 elif isinstance(e, portage.exception.PackageNotFound):
9386 out.eerror("An expected package is " + \
9387 "not available: %s" % str(e))
9389 msg = "The resume list contains one or more " + \
9390 "packages that are no longer " + \
9391 "available. Please restart/continue " + \
9392 "the operation manually."
9393 for line in wrap(msg, 72):
9397 print "\b\b... done!"
9401 portage.writemsg("!!! One or more packages have been " + \
9402 "dropped due to\n" + \
9403 "!!! masking or unsatisfied dependencies:\n\n",
9405 for task in dropped_tasks:
9406 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
9407 portage.writemsg("\n", noiselevel=-1)
9410 if mydepgraph is not None:
9411 mydepgraph.display_problems()
9412 if not (ask or pretend):
9413 # delete the current list and also the backup
9414 # since it's probably stale too.
9415 for k in ("resume", "resume_backup"):
9416 mtimedb.pop(k, None)
9421 if ("--resume" in myopts):
9422 print darkgreen("emerge: It seems we have nothing to resume...")
9425 myparams = create_depgraph_params(myopts, myaction)
9426 if "--quiet" not in myopts and "--nodeps" not in myopts:
9427 print "Calculating dependencies ",
9429 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
9431 retval, favorites = mydepgraph.select_files(myfiles)
9432 except portage.exception.PackageNotFound, e:
9433 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
9435 except portage.exception.PackageSetNotFound, e:
9436 root_config = trees[settings["ROOT"]]["root_config"]
9437 display_missing_pkg_set(root_config, e.value)
9440 print "\b\b... done!"
9442 mydepgraph.display_problems()
9445 if "--pretend" not in myopts and \
9446 ("--ask" in myopts or "--tree" in myopts or \
9447 "--verbose" in myopts) and \
9448 not ("--quiet" in myopts and "--ask" not in myopts):
9449 if "--resume" in myopts:
9450 mymergelist = mydepgraph.altlist()
9451 if len(mymergelist) == 0:
9452 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
9454 favorites = mtimedb["resume"]["favorites"]
9455 retval = mydepgraph.display(
9456 mydepgraph.altlist(reversed=tree),
9457 favorites=favorites)
9458 mydepgraph.display_problems()
9459 if retval != os.EX_OK:
9461 prompt="Would you like to resume merging these packages?"
9463 retval = mydepgraph.display(
9464 mydepgraph.altlist(reversed=("--tree" in myopts)),
9465 favorites=favorites)
9466 mydepgraph.display_problems()
9467 if retval != os.EX_OK:
9470 for x in mydepgraph.altlist():
9471 if isinstance(x, Package) and x.operation == "merge":
9475 sets = trees[settings["ROOT"]]["root_config"].sets
9476 world_candidates = None
9477 if "--noreplace" in myopts and \
9478 not oneshot and favorites:
9479 # Sets that are not world candidates are filtered
9480 # out here since the favorites list needs to be
9481 # complete for depgraph.loadResumeCommand() to
9482 # operate correctly.
9483 world_candidates = [x for x in favorites \
9484 if not (x.startswith(SETPREFIX) and \
9485 not sets[x[1:]].world_candidate)]
9486 if "--noreplace" in myopts and \
9487 not oneshot and world_candidates:
9489 for x in world_candidates:
9490 print " %s %s" % (good("*"), x)
9491 prompt="Would you like to add these packages to your world favorites?"
9492 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
9493 prompt="Nothing to merge; would you like to auto-clean packages?"
9496 print "Nothing to merge; quitting."
9499 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
9500 prompt="Would you like to fetch the source files for these packages?"
9502 prompt="Would you like to merge these packages?"
9504 if "--ask" in myopts and userquery(prompt) == "No":
9509 # Don't ask again (e.g. when auto-cleaning packages after merge)
9510 myopts.pop("--ask", None)
9512 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
9513 if ("--resume" in myopts):
9514 mymergelist = mydepgraph.altlist()
9515 if len(mymergelist) == 0:
9516 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
9518 favorites = mtimedb["resume"]["favorites"]
9519 retval = mydepgraph.display(
9520 mydepgraph.altlist(reversed=tree),
9521 favorites=favorites)
9522 mydepgraph.display_problems()
9523 if retval != os.EX_OK:
9526 retval = mydepgraph.display(
9527 mydepgraph.altlist(reversed=("--tree" in myopts)),
9528 favorites=favorites)
9529 mydepgraph.display_problems()
9530 if retval != os.EX_OK:
9532 if "--buildpkgonly" in myopts:
9533 graph_copy = mydepgraph.digraph.clone()
9534 removed_nodes = set()
9535 for node in graph_copy:
9536 if not isinstance(node, Package) or \
9537 node.operation == "nomerge":
9538 removed_nodes.add(node)
9539 graph_copy.difference_update(removed_nodes)
9540 if not graph_copy.hasallzeros(ignore_priority = \
9541 DepPrioritySatisfiedRange.ignore_medium):
9542 print "\n!!! --buildpkgonly requires all dependencies to be merged."
9543 print "!!! You have to merge the dependencies before you can build this package.\n"
9546 if "--buildpkgonly" in myopts:
9547 graph_copy = mydepgraph.digraph.clone()
9548 removed_nodes = set()
9549 for node in graph_copy:
9550 if not isinstance(node, Package) or \
9551 node.operation == "nomerge":
9552 removed_nodes.add(node)
9553 graph_copy.difference_update(removed_nodes)
9554 if not graph_copy.hasallzeros(ignore_priority = \
9555 DepPrioritySatisfiedRange.ignore_medium):
9556 print "\n!!! --buildpkgonly requires all dependencies to be merged."
9557 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
9560 if ("--resume" in myopts):
9561 favorites=mtimedb["resume"]["favorites"]
9562 mymergelist = mydepgraph.altlist()
9563 mydepgraph.break_refs(mymergelist)
9564 mergetask = Scheduler(settings, trees, mtimedb, myopts,
9565 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
9566 del mydepgraph, mymergelist
9569 retval = mergetask.merge()
9570 merge_count = mergetask.curval
9572 if "resume" in mtimedb and \
9573 "mergelist" in mtimedb["resume"] and \
9574 len(mtimedb["resume"]["mergelist"]) > 1:
9575 mtimedb["resume_backup"] = mtimedb["resume"]
9576 del mtimedb["resume"]
9578 mtimedb["resume"]={}
9579 # Stored as a dict starting with portage-2.1.6_rc1, and supported
9580 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
9581 # a list type for options.
9582 mtimedb["resume"]["myopts"] = myopts.copy()
9584 # Convert Atom instances to plain str.
9585 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
9587 pkglist = mydepgraph.altlist()
9588 mydepgraph.saveNomergeFavorites()
9589 mydepgraph.break_refs(pkglist)
9590 mergetask = Scheduler(settings, trees, mtimedb, myopts,
9591 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
9592 del mydepgraph, pkglist
9595 retval = mergetask.merge()
9596 merge_count = mergetask.curval
9598 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
9599 if "yes" == settings.get("AUTOCLEAN"):
9600 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
9601 unmerge(trees[settings["ROOT"]]["root_config"],
9602 myopts, "clean", [],
9603 ldpath_mtimes, autoclean=1)
9605 portage.writemsg_stdout(colorize("WARN", "WARNING:")
9606 + " AUTOCLEAN is disabled. This can cause serious"
9607 + " problems due to overlapping packages.\n")
9608 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
9612 def multiple_actions(action1, action2):
9613 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
9614 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
9617 def insert_optional_args(args):
9619 Parse optional arguments and insert a value if one has
9620 not been provided. This is done before feeding the args
9621 to the optparse parser since that parser does not support
9622 this feature natively.
9626 jobs_opts = ("-j", "--jobs")
9627 default_arg_opts = {
9628 '--deselect' : ('n',),
9629 '--root-deps' : ('rdeps',),
9634 arg = arg_stack.pop()
9636 default_arg_choices = default_arg_opts.get(arg)
9637 if default_arg_choices is not None:
9638 new_args.append(arg)
9639 if arg_stack and arg_stack[-1] in default_arg_choices:
9640 new_args.append(arg_stack.pop())
9642 # insert default argument
9643 new_args.append('True')
9646 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
9647 if not (short_job_opt or arg in jobs_opts):
9648 new_args.append(arg)
9651 # Insert an empty placeholder in order to
9652 # satisfy the requirements of optparse.
9654 new_args.append("--jobs")
9657 if short_job_opt and len(arg) > 2:
9660 job_count = int(arg[2:])
9662 saved_opts = arg[2:]
9665 saved_opts = arg[1:].replace("j", "")
9667 if job_count is None and arg_stack:
9669 job_count = int(arg_stack[-1])
9673 # Discard the job count from the stack
9674 # since we're consuming it here.
9677 if job_count is None:
9678 # unlimited number of jobs
9679 new_args.append("True")
9681 new_args.append(str(job_count))
9683 if saved_opts is not None:
9684 new_args.append("-" + saved_opts)
9688 def parse_opts(tmpcmdline, silent=False):
9693 global actions, options, shortmapping
9695 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
9696 argument_options = {
9698 "help":"specify the location for portage configuration files",
9702 "help":"enable or disable color output",
9704 "choices":("y", "n")
9708 "help" : "remove atoms from the world file",
9710 "choices" : ("True", "n")
9715 "help" : "Specifies the number of packages to build " + \
9723 "help" :"Specifies that no new builds should be started " + \
9724 "if there are other builds running and the load average " + \
9725 "is at least LOAD (a floating-point number).",
9731 "help":"include unnecessary build time dependencies",
9733 "choices":("y", "n")
9736 "help":"specify conditions to trigger package reinstallation",
9738 "choices":["changed-use"]
9741 "help" : "specify the target root filesystem for merging packages",
9746 "help" : "modify interpretation of depedencies",
9748 "choices" :("True", "rdeps")
9752 from optparse import OptionParser
9753 parser = OptionParser()
9754 if parser.has_option("--help"):
9755 parser.remove_option("--help")
9757 for action_opt in actions:
9758 parser.add_option("--" + action_opt, action="store_true",
9759 dest=action_opt.replace("-", "_"), default=False)
9760 for myopt in options:
9761 parser.add_option(myopt, action="store_true",
9762 dest=myopt.lstrip("--").replace("-", "_"), default=False)
9763 for shortopt, longopt in shortmapping.iteritems():
9764 parser.add_option("-" + shortopt, action="store_true",
9765 dest=longopt.lstrip("--").replace("-", "_"), default=False)
9766 for myalias, myopt in longopt_aliases.iteritems():
9767 parser.add_option(myalias, action="store_true",
9768 dest=myopt.lstrip("--").replace("-", "_"), default=False)
9770 for myopt, kwargs in argument_options.iteritems():
9771 parser.add_option(myopt,
9772 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
9774 tmpcmdline = insert_optional_args(tmpcmdline)
9776 myoptions, myargs = parser.parse_args(args=tmpcmdline)
9778 if myoptions.deselect == "True":
9779 myoptions.deselect = True
9781 if myoptions.root_deps == "True":
9782 myoptions.root_deps = True
9786 if myoptions.jobs == "True":
9790 jobs = int(myoptions.jobs)
9794 if jobs is not True and \
9798 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
9799 (myoptions.jobs,), noiselevel=-1)
9801 myoptions.jobs = jobs
9803 if myoptions.load_average:
9805 load_average = float(myoptions.load_average)
9809 if load_average <= 0.0:
9812 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
9813 (myoptions.load_average,), noiselevel=-1)
9815 myoptions.load_average = load_average
9817 for myopt in options:
9818 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
9820 myopts[myopt] = True
9822 for myopt in argument_options:
9823 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
9827 if myoptions.searchdesc:
9828 myoptions.search = True
9830 for action_opt in actions:
9831 v = getattr(myoptions, action_opt.replace("-", "_"))
9834 multiple_actions(myaction, action_opt)
9836 myaction = action_opt
9838 if myaction is None and myoptions.deselect is True:
9839 myaction = 'deselect'
9843 return myaction, myopts, myfiles
9845 def validate_ebuild_environment(trees):
9846 for myroot in trees:
9847 settings = trees[myroot]["vartree"].settings
9850 def clear_caches(trees):
9851 for d in trees.itervalues():
9852 d["porttree"].dbapi.melt()
9853 d["porttree"].dbapi._aux_cache.clear()
9854 d["bintree"].dbapi._aux_cache.clear()
9855 d["bintree"].dbapi._clear_cache()
9856 d["vartree"].dbapi.linkmap._clear_cache()
9857 portage.dircache.clear()
9860 def load_emerge_config(trees=None):
9862 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
9863 v = os.environ.get(envvar, None)
9866 trees = portage.create_trees(trees=trees, **kwargs)
9868 for root, root_trees in trees.iteritems():
9869 settings = root_trees["vartree"].settings
9870 setconfig = load_default_config(settings, root_trees)
9871 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
9873 settings = trees["/"]["vartree"].settings
9875 for myroot in trees:
9877 settings = trees[myroot]["vartree"].settings
9880 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
9881 mtimedb = portage.MtimeDB(mtimedbfile)
9883 return settings, trees, mtimedb
9885 def adjust_config(myopts, settings):
9886 """Make emerge specific adjustments to the config."""
9888 # To enhance usability, make some vars case insensitive by forcing them to
9890 for myvar in ("AUTOCLEAN", "NOCOLOR"):
9891 if myvar in settings:
9892 settings[myvar] = settings[myvar].lower()
9893 settings.backup_changes(myvar)
9896 # Kill noauto as it will break merges otherwise.
9897 if "noauto" in settings.features:
9898 settings.features.remove('noauto')
9899 settings['FEATURES'] = ' '.join(sorted(settings.features))
9900 settings.backup_changes("FEATURES")
9904 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
9905 except ValueError, e:
9906 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
9907 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
9908 settings["CLEAN_DELAY"], noiselevel=-1)
9909 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
9910 settings.backup_changes("CLEAN_DELAY")
9912 EMERGE_WARNING_DELAY = 10
9914 EMERGE_WARNING_DELAY = int(settings.get(
9915 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
9916 except ValueError, e:
9917 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
9918 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
9919 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
9920 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
9921 settings.backup_changes("EMERGE_WARNING_DELAY")
9923 if "--quiet" in myopts:
9924 settings["PORTAGE_QUIET"]="1"
9925 settings.backup_changes("PORTAGE_QUIET")
9927 if "--verbose" in myopts:
9928 settings["PORTAGE_VERBOSE"] = "1"
9929 settings.backup_changes("PORTAGE_VERBOSE")
9931 # Set so that configs will be merged regardless of remembered status
9932 if ("--noconfmem" in myopts):
9933 settings["NOCONFMEM"]="1"
9934 settings.backup_changes("NOCONFMEM")
9936 # Set various debug markers... They should be merged somehow.
9939 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
9940 if PORTAGE_DEBUG not in (0, 1):
9941 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
9942 PORTAGE_DEBUG, noiselevel=-1)
9943 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
9946 except ValueError, e:
9947 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
9948 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
9949 settings["PORTAGE_DEBUG"], noiselevel=-1)
9951 if "--debug" in myopts:
9953 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
9954 settings.backup_changes("PORTAGE_DEBUG")
9956 if settings.get("NOCOLOR") not in ("yes","true"):
9957 portage.output.havecolor = 1
9959 """The explicit --color < y | n > option overrides the NOCOLOR environment
9960 variable and stdout auto-detection."""
9961 if "--color" in myopts:
9962 if "y" == myopts["--color"]:
9963 portage.output.havecolor = 1
9964 settings["NOCOLOR"] = "false"
9966 portage.output.havecolor = 0
9967 settings["NOCOLOR"] = "true"
9968 settings.backup_changes("NOCOLOR")
9969 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
9970 portage.output.havecolor = 0
9971 settings["NOCOLOR"] = "true"
9972 settings.backup_changes("NOCOLOR")
9974 def apply_priorities(settings):
9980 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
9981 except (OSError, ValueError), e:
9982 out = portage.output.EOutput()
9983 out.eerror("Failed to change nice value to '%s'" % \
9984 settings["PORTAGE_NICENESS"])
9985 out.eerror("%s\n" % str(e))
9987 def ionice(settings):
9989 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
9991 ionice_cmd = shlex.split(ionice_cmd)
9995 from portage.util import varexpand
9996 variables = {"PID" : str(os.getpid())}
9997 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
10000 rval = portage.process.spawn(cmd, env=os.environ)
10001 except portage.exception.CommandNotFound:
10002 # The OS kernel probably doesn't support ionice,
10003 # so return silently.
10006 if rval != os.EX_OK:
10007 out = portage.output.EOutput()
10008 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
10009 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
10011 def display_missing_pkg_set(root_config, set_name):
10014 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
10015 "The following sets exist:") % \
10016 colorize("INFORM", set_name))
10019 for s in sorted(root_config.sets):
10020 msg.append(" %s" % s)
10023 writemsg_level("".join("%s\n" % l for l in msg),
10024 level=logging.ERROR, noiselevel=-1)
10026 def expand_set_arguments(myfiles, myaction, root_config):
10028 setconfig = root_config.setconfig
10030 sets = setconfig.getSets()
10032 # In order to know exactly which atoms/sets should be added to the
10033 # world file, the depgraph performs set expansion later. It will get
10034 # confused about where the atoms came from if it's not allowed to
10035 # expand them itself.
10036 do_not_expand = (None, )
10039 if a in ("system", "world"):
10040 newargs.append(SETPREFIX+a)
10047 # separators for set arguments
10051 # WARNING: all operators must be of equal length
10053 DIFF_OPERATOR = "-@"
10054 UNION_OPERATOR = "+@"
10056 for i in range(0, len(myfiles)):
10057 if myfiles[i].startswith(SETPREFIX):
10060 x = myfiles[i][len(SETPREFIX):]
10063 start = x.find(ARG_START)
10064 end = x.find(ARG_END)
10065 if start > 0 and start < end:
10066 namepart = x[:start]
10067 argpart = x[start+1:end]
10069 # TODO: implement proper quoting
10070 args = argpart.split(",")
10074 k, v = a.split("=", 1)
10077 options[a] = "True"
10078 setconfig.update(namepart, options)
10079 newset += (x[:start-len(namepart)]+namepart)
10080 x = x[end+len(ARG_END):]
10084 myfiles[i] = SETPREFIX+newset
10086 sets = setconfig.getSets()
10088 # display errors that occured while loading the SetConfig instance
10089 for e in setconfig.errors:
10090 print colorize("BAD", "Error during set creation: %s" % e)
10092 # emerge relies on the existance of sets with names "world" and "system"
10093 required_sets = ("world", "system")
10096 for s in required_sets:
10098 missing_sets.append(s)
10100 if len(missing_sets) > 2:
10101 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
10102 missing_sets_str += ', and "%s"' % missing_sets[-1]
10103 elif len(missing_sets) == 2:
10104 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
10106 missing_sets_str = '"%s"' % missing_sets[-1]
10107 msg = ["emerge: incomplete set configuration, " + \
10108 "missing set(s): %s" % missing_sets_str]
10110 msg.append(" sets defined: %s" % ", ".join(sets))
10111 msg.append(" This usually means that '%s'" % \
10112 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
10113 msg.append(" is missing or corrupt.")
10115 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
10117 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
10120 if a.startswith(SETPREFIX):
10121 # support simple set operations (intersection, difference and union)
10122 # on the commandline. Expressions are evaluated strictly left-to-right
10123 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
10124 expression = a[len(SETPREFIX):]
10127 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
10128 is_pos = expression.rfind(IS_OPERATOR)
10129 diff_pos = expression.rfind(DIFF_OPERATOR)
10130 union_pos = expression.rfind(UNION_OPERATOR)
10131 op_pos = max(is_pos, diff_pos, union_pos)
10132 s1 = expression[:op_pos]
10133 s2 = expression[op_pos+len(IS_OPERATOR):]
10134 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
10136 display_missing_pkg_set(root_config, s2)
10138 expr_sets.insert(0, s2)
10139 expr_ops.insert(0, op)
10141 if not expression in sets:
10142 display_missing_pkg_set(root_config, expression)
10144 expr_sets.insert(0, expression)
10145 result = set(setconfig.getSetAtoms(expression))
10146 for i in range(0, len(expr_ops)):
10147 s2 = setconfig.getSetAtoms(expr_sets[i+1])
10148 if expr_ops[i] == IS_OPERATOR:
10149 result.intersection_update(s2)
10150 elif expr_ops[i] == DIFF_OPERATOR:
10151 result.difference_update(s2)
10152 elif expr_ops[i] == UNION_OPERATOR:
10155 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
10156 newargs.extend(result)
10158 s = a[len(SETPREFIX):]
10160 display_missing_pkg_set(root_config, s)
10162 setconfig.active.append(s)
10164 set_atoms = setconfig.getSetAtoms(s)
10165 except portage.exception.PackageSetNotFound, e:
10166 writemsg_level(("emerge: the given set '%s' " + \
10167 "contains a non-existent set named '%s'.\n") % \
10168 (s, e), level=logging.ERROR, noiselevel=-1)
10170 if myaction in unmerge_actions and \
10171 not sets[s].supportsOperation("unmerge"):
10172 sys.stderr.write("emerge: the given set '%s' does " % s + \
10173 "not support unmerge operations\n")
10175 elif not set_atoms:
10176 print "emerge: '%s' is an empty set" % s
10177 elif myaction not in do_not_expand:
10178 newargs.extend(set_atoms)
10180 newargs.append(SETPREFIX+s)
10181 for e in sets[s].errors:
10185 return (newargs, retval)
10187 def repo_name_check(trees):
10188 missing_repo_names = set()
10189 for root, root_trees in trees.iteritems():
10190 if "porttree" in root_trees:
10191 portdb = root_trees["porttree"].dbapi
10192 missing_repo_names.update(portdb.porttrees)
10193 repos = portdb.getRepositories()
10195 missing_repo_names.discard(portdb.getRepositoryPath(r))
10196 if portdb.porttree_root in missing_repo_names and \
10197 not os.path.exists(os.path.join(
10198 portdb.porttree_root, "profiles")):
10199 # This is normal if $PORTDIR happens to be empty,
10200 # so don't warn about it.
10201 missing_repo_names.remove(portdb.porttree_root)
10203 if missing_repo_names:
10205 msg.append("WARNING: One or more repositories " + \
10206 "have missing repo_name entries:")
10208 for p in missing_repo_names:
10209 msg.append("\t%s/profiles/repo_name" % (p,))
10211 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
10212 "should be a plain text file containing a unique " + \
10213 "name for the repository on the first line.", 70))
10214 writemsg_level("".join("%s\n" % l for l in msg),
10215 level=logging.WARNING, noiselevel=-1)
10217 return bool(missing_repo_names)
10219 def repo_name_duplicate_check(trees):
10221 for root, root_trees in trees.iteritems():
10222 if 'porttree' in root_trees:
10223 portdb = root_trees['porttree'].dbapi
10224 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
10225 for repo_name, paths in portdb._ignored_repos:
10226 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
10227 ignored_repos.setdefault(k, []).extend(paths)
10231 msg.append('WARNING: One or more repositories ' + \
10232 'have been ignored due to duplicate')
10233 msg.append(' profiles/repo_name entries:')
10235 for k in sorted(ignored_repos):
10236 msg.append(' %s overrides' % (k,))
10237 for path in ignored_repos[k]:
10238 msg.append(' %s' % (path,))
10240 msg.extend(' ' + x for x in textwrap.wrap(
10241 "All profiles/repo_name entries must be unique in order " + \
10242 "to avoid having duplicates ignored. " + \
10243 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
10244 "/etc/make.conf if you would like to disable this warning."))
10245 writemsg_level(''.join('%s\n' % l for l in msg),
10246 level=logging.WARNING, noiselevel=-1)
10248 return bool(ignored_repos)
10250 def config_protect_check(trees):
10251 for root, root_trees in trees.iteritems():
10252 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
10253 msg = "!!! CONFIG_PROTECT is empty"
10255 msg += " for '%s'" % root
10256 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
10258 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
10260 if "--quiet" in myopts:
10261 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
10262 print "!!! one of the following fully-qualified ebuild names instead:\n"
10263 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
10264 print " " + colorize("INFORM", cp)
10267 s = search(root_config, spinner, "--searchdesc" in myopts,
10268 "--quiet" not in myopts, "--usepkg" in myopts,
10269 "--usepkgonly" in myopts)
10270 null_cp = portage.dep_getkey(insert_category_into_atom(
10272 cat, atom_pn = portage.catsplit(null_cp)
10273 s.searchkey = atom_pn
10274 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
10277 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
10278 print "!!! one of the above fully-qualified ebuild names instead.\n"
10280 def profile_check(trees, myaction, myopts):
10281 if myaction in ("info", "sync"):
10283 elif "--version" in myopts or "--help" in myopts:
10285 for root, root_trees in trees.iteritems():
10286 if root_trees["root_config"].settings.profiles:
10288 # generate some profile related warning messages
10289 validate_ebuild_environment(trees)
10290 msg = "If you have just changed your profile configuration, you " + \
10291 "should revert back to the previous configuration. Due to " + \
10292 "your current profile being invalid, allowed actions are " + \
10293 "limited to --help, --info, --sync, and --version."
10294 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
10295 level=logging.ERROR, noiselevel=-1)
10300 global portage # NFC why this is necessary now - genone
10301 portage._disable_legacy_globals()
10302 # Disable color until we're sure that it should be enabled (after
10303 # EMERGE_DEFAULT_OPTS has been parsed).
10304 portage.output.havecolor = 0
10305 # This first pass is just for options that need to be known as early as
10306 # possible, such as --config-root. They will be parsed again later,
10307 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
10308 # the value of --config-root).
10309 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
10310 if "--debug" in myopts:
10311 os.environ["PORTAGE_DEBUG"] = "1"
10312 if "--config-root" in myopts:
10313 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
10314 if "--root" in myopts:
10315 os.environ["ROOT"] = myopts["--root"]
10317 # Portage needs to ensure a sane umask for the files it creates.
10319 settings, trees, mtimedb = load_emerge_config()
10320 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10321 rval = profile_check(trees, myaction, myopts)
10322 if rval != os.EX_OK:
10325 if portage._global_updates(trees, mtimedb["updates"]):
10327 # Reload the whole config from scratch.
10328 settings, trees, mtimedb = load_emerge_config(trees=trees)
10329 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10331 xterm_titles = "notitles" not in settings.features
10334 if "--ignore-default-opts" not in myopts:
10335 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
10336 tmpcmdline.extend(sys.argv[1:])
10337 myaction, myopts, myfiles = parse_opts(tmpcmdline)
10339 if "--digest" in myopts:
10340 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
10341 # Reload the whole config from scratch so that the portdbapi internal
10342 # config is updated with new FEATURES.
10343 settings, trees, mtimedb = load_emerge_config(trees=trees)
10344 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10346 for myroot in trees:
10347 mysettings = trees[myroot]["vartree"].settings
10348 mysettings.unlock()
10349 adjust_config(myopts, mysettings)
10350 if '--pretend' not in myopts and myaction in \
10351 (None, 'clean', 'depclean', 'prune', 'unmerge'):
10352 mysettings["PORTAGE_COUNTER_HASH"] = \
10353 trees[myroot]["vartree"].dbapi._counter_hash()
10354 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
10356 del myroot, mysettings
10358 apply_priorities(settings)
10360 spinner = stdout_spinner()
10361 if "candy" in settings.features:
10362 spinner.update = spinner.update_scroll
10364 if "--quiet" not in myopts:
10365 portage.deprecated_profile_check(settings=settings)
10366 repo_name_check(trees)
10367 repo_name_duplicate_check(trees)
10368 config_protect_check(trees)
10370 for mytrees in trees.itervalues():
10371 mydb = mytrees["porttree"].dbapi
10372 # Freeze the portdbapi for performance (memoize all xmatch results).
10376 if "moo" in myfiles:
10379 Larry loves Gentoo (""" + platform.system() + """)
10381 _______________________
10382 < Have you mooed today? >
10383 -----------------------
10393 ext = os.path.splitext(x)[1]
10394 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
10395 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
10398 root_config = trees[settings["ROOT"]]["root_config"]
10399 if myaction == "list-sets":
10400 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
10404 # only expand sets for actions taking package arguments
10405 oldargs = myfiles[:]
10406 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
10407 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
10408 if retval != os.EX_OK:
10411 # Need to handle empty sets specially, otherwise emerge will react
10412 # with the help message for empty argument lists
10413 if oldargs and not myfiles:
10414 print "emerge: no targets left after set expansion"
10417 if ("--tree" in myopts) and ("--columns" in myopts):
10418 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
10421 if ("--quiet" in myopts):
10422 spinner.update = spinner.update_quiet
10423 portage.util.noiselimit = -1
10425 # Always create packages if FEATURES=buildpkg
10426 # Imply --buildpkg if --buildpkgonly
10427 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
10428 if "--buildpkg" not in myopts:
10429 myopts["--buildpkg"] = True
10431 # Always try and fetch binary packages if FEATURES=getbinpkg
10432 if ("getbinpkg" in settings.features):
10433 myopts["--getbinpkg"] = True
10435 if "--buildpkgonly" in myopts:
10436 # --buildpkgonly will not merge anything, so
10437 # it cancels all binary package options.
10438 for opt in ("--getbinpkg", "--getbinpkgonly",
10439 "--usepkg", "--usepkgonly"):
10440 myopts.pop(opt, None)
10442 if "--fetch-all-uri" in myopts:
10443 myopts["--fetchonly"] = True
10445 if "--skipfirst" in myopts and "--resume" not in myopts:
10446 myopts["--resume"] = True
10448 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
10449 myopts["--usepkgonly"] = True
10451 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
10452 myopts["--getbinpkg"] = True
10454 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
10455 myopts["--usepkg"] = True
10457 # Also allow -K to apply --usepkg/-k
10458 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
10459 myopts["--usepkg"] = True
10461 # Allow -p to remove --ask
10462 if "--pretend" in myopts:
10463 myopts.pop("--ask", None)
10465 # forbid --ask when not in a terminal
10466 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
10467 if ("--ask" in myopts) and (not sys.stdin.isatty()):
10468 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
10472 if settings.get("PORTAGE_DEBUG", "") == "1":
10473 spinner.update = spinner.update_quiet
10475 if "python-trace" in settings.features:
10476 import portage.debug
10477 portage.debug.set_trace(True)
10479 if not ("--quiet" in myopts):
10480 if not sys.stdout.isatty() or ("--nospinner" in myopts):
10481 spinner.update = spinner.update_basic
10483 if myaction == 'version':
10484 print getportageversion(settings["PORTDIR"], settings["ROOT"],
10485 settings.profile_path, settings["CHOST"],
10486 trees[settings["ROOT"]]["vartree"].dbapi)
10488 elif "--help" in myopts:
10489 _emerge.help.help(myaction, myopts, portage.output.havecolor)
10492 if "--debug" in myopts:
10493 print "myaction", myaction
10494 print "myopts", myopts
10496 if not myaction and not myfiles and "--resume" not in myopts:
10497 _emerge.help.help(myaction, myopts, portage.output.havecolor)
10500 pretend = "--pretend" in myopts
10501 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
10502 buildpkgonly = "--buildpkgonly" in myopts
10504 # check if root user is the current user for the actions where emerge needs this
10505 if portage.secpass < 2:
10506 # We've already allowed "--version" and "--help" above.
10507 if "--pretend" not in myopts and myaction not in ("search","info"):
10508 need_superuser = myaction in ('clean', 'depclean', 'deselect',
10509 'prune', 'unmerge') or not \
10511 (buildpkgonly and secpass >= 1) or \
10512 myaction in ("metadata", "regen") or \
10513 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
10514 if portage.secpass < 1 or \
10517 access_desc = "superuser"
10519 access_desc = "portage group"
10520 # Always show portage_group_warning() when only portage group
10521 # access is required but the user is not in the portage group.
10522 from portage.data import portage_group_warning
10523 if "--ask" in myopts:
10524 myopts["--pretend"] = True
10525 del myopts["--ask"]
10526 print ("%s access is required... " + \
10527 "adding --pretend to options\n") % access_desc
10528 if portage.secpass < 1 and not need_superuser:
10529 portage_group_warning()
10531 sys.stderr.write(("emerge: %s access is required\n") \
10533 if portage.secpass < 1 and not need_superuser:
10534 portage_group_warning()
10537 disable_emergelog = False
10538 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
10540 disable_emergelog = True
10542 if myaction in ("search", "info"):
10543 disable_emergelog = True
10544 if disable_emergelog:
10545 """ Disable emergelog for everything except build or unmerge
10546 operations. This helps minimize parallel emerge.log entries that can
10547 confuse log parsers. We especially want it disabled during
10548 parallel-fetch, which uses --resume --fetchonly."""
10550 def emergelog(*pargs, **kargs):
10554 if 'EMERGE_LOG_DIR' in settings:
10556 # At least the parent needs to exist for the lock file.
10557 portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
10558 except portage.exception.PortageException, e:
10559 writemsg_level("!!! Error creating directory for " + \
10560 "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
10561 (settings['EMERGE_LOG_DIR'], e),
10562 noiselevel=-1, level=logging.ERROR)
10564 global _emerge_log_dir
10565 _emerge_log_dir = settings['EMERGE_LOG_DIR']
10567 if not "--pretend" in myopts:
10568 emergelog(xterm_titles, "Started emerge on: "+\
10569 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
10572 myelogstr=" ".join(myopts)
10574 myelogstr+=" "+myaction
10576 myelogstr += " " + " ".join(oldargs)
10577 emergelog(xterm_titles, " *** emerge " + myelogstr)
10580 def emergeexitsig(signum, frame):
10581 signal.signal(signal.SIGINT, signal.SIG_IGN)
10582 signal.signal(signal.SIGTERM, signal.SIG_IGN)
10583 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
10584 sys.exit(100+signum)
10585 signal.signal(signal.SIGINT, emergeexitsig)
10586 signal.signal(signal.SIGTERM, emergeexitsig)
10589 """This gets out final log message in before we quit."""
10590 if "--pretend" not in myopts:
10591 emergelog(xterm_titles, " *** terminating.")
10592 if "notitles" not in settings.features:
10594 portage.atexit_register(emergeexit)
10596 if myaction in ("config", "metadata", "regen", "sync"):
10597 if "--pretend" in myopts:
10598 sys.stderr.write(("emerge: The '%s' action does " + \
10599 "not support '--pretend'.\n") % myaction)
10602 if "sync" == myaction:
10603 return action_sync(settings, trees, mtimedb, myopts, myaction)
10604 elif "metadata" == myaction:
10605 action_metadata(settings, portdb, myopts)
10606 elif myaction=="regen":
10607 validate_ebuild_environment(trees)
10608 return action_regen(settings, portdb, myopts.get("--jobs"),
10609 myopts.get("--load-average"))
10611 elif "config"==myaction:
10612 validate_ebuild_environment(trees)
10613 action_config(settings, trees, myopts, myfiles)
10616 elif "search"==myaction:
10617 validate_ebuild_environment(trees)
10618 action_search(trees[settings["ROOT"]]["root_config"],
10619 myopts, myfiles, spinner)
10621 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
10622 validate_ebuild_environment(trees)
10623 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
10624 myopts, myaction, myfiles, spinner)
10625 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
10626 post_emerge(root_config, myopts, mtimedb, rval)
10629 elif myaction == 'info':
10631 # Ensure atoms are valid before calling unmerge().
10632 vardb = trees[settings["ROOT"]]["vartree"].dbapi
10635 if is_valid_package_atom(x):
10637 valid_atoms.append(
10638 portage.dep_expand(x, mydb=vardb, settings=settings))
10639 except portage.exception.AmbiguousPackageName, e:
10640 msg = "The short ebuild name \"" + x + \
10641 "\" is ambiguous. Please specify " + \
10642 "one of the following " + \
10643 "fully-qualified ebuild names instead:"
10644 for line in textwrap.wrap(msg, 70):
10645 writemsg_level("!!! %s\n" % (line,),
10646 level=logging.ERROR, noiselevel=-1)
10648 writemsg_level(" %s\n" % colorize("INFORM", i),
10649 level=logging.ERROR, noiselevel=-1)
10650 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
10654 msg.append("'%s' is not a valid package atom." % (x,))
10655 msg.append("Please check ebuild(5) for full details.")
10656 writemsg_level("".join("!!! %s\n" % line for line in msg),
10657 level=logging.ERROR, noiselevel=-1)
10660 return action_info(settings, trees, myopts, valid_atoms)
10662 # "update", "system", or just process files:
10664 validate_ebuild_environment(trees)
10667 if x.startswith(SETPREFIX) or \
10668 is_valid_package_atom(x):
10670 if x[:1] == os.sep:
10678 msg.append("'%s' is not a valid package atom." % (x,))
10679 msg.append("Please check ebuild(5) for full details.")
10680 writemsg_level("".join("!!! %s\n" % line for line in msg),
10681 level=logging.ERROR, noiselevel=-1)
10684 if "--pretend" not in myopts:
10685 display_news_notification(root_config, myopts)
10686 retval = action_build(settings, trees, mtimedb,
10687 myopts, myaction, myfiles, spinner)
10688 root_config = trees[settings["ROOT"]]["root_config"]
10689 post_emerge(root_config, myopts, mtimedb, retval)