2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
22 from os import path as osp
23 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
26 from portage import digraph
27 from portage.const import NEWS_LIB_PATH
30 import portage.xpak, commands, errno, re, socket, time
31 from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
32 nc_len, red, teal, turquoise, xtermTitle, \
33 xtermTitleReset, yellow
34 from portage.output import create_color_func
35 good = create_color_func("GOOD")
36 bad = create_color_func("BAD")
37 # white looks bad on terminals with white background
38 from portage.output import bold as white
42 portage.dep._dep_check_strict = True
45 import portage.exception
46 from portage.cache.cache_errors import CacheError
47 from portage.data import secpass
48 from portage.elog.messages import eerror
49 from portage.util import normalize_path as normpath
50 from portage.util import cmp_sort_key, writemsg, writemsg_level
51 from portage.sets import load_default_config, SETPREFIX
52 from portage.sets.base import InternalPackageSet
54 from itertools import chain, izip
56 from _emerge.SlotObject import SlotObject
57 from _emerge.DepPriority import DepPriority
58 from _emerge.BlockerDepPriority import BlockerDepPriority
59 from _emerge.UnmergeDepPriority import UnmergeDepPriority
60 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
61 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
62 from _emerge.Task import Task
63 from _emerge.Blocker import Blocker
64 from _emerge.PollConstants import PollConstants
65 from _emerge.AsynchronousTask import AsynchronousTask
66 from _emerge.CompositeTask import CompositeTask
67 from _emerge.EbuildFetcher import EbuildFetcher
68 from _emerge.EbuildBuild import EbuildBuild
69 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
70 from _emerge.EbuildPhase import EbuildPhase
71 from _emerge.Binpkg import Binpkg
72 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
73 from _emerge.PackageMerge import PackageMerge
74 from _emerge.DependencyArg import DependencyArg
75 from _emerge.AtomArg import AtomArg
76 from _emerge.PackageArg import PackageArg
77 from _emerge.SetArg import SetArg
78 from _emerge.Dependency import Dependency
79 from _emerge.BlockerCache import BlockerCache
80 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
81 from _emerge.RepoDisplay import RepoDisplay
82 from _emerge.UseFlagDisplay import UseFlagDisplay
83 from _emerge.PollSelectAdapter import PollSelectAdapter
84 from _emerge.SequentialTaskQueue import SequentialTaskQueue
85 from _emerge.ProgressHandler import ProgressHandler
86 from _emerge.stdout_spinner import stdout_spinner
87 from _emerge.UninstallFailure import UninstallFailure
88 from _emerge.JobStatusDisplay import JobStatusDisplay
89 from _emerge.getloadavg import getloadavg
91 def userquery(prompt, responses=None, colours=None):
92 """Displays a prompt and a set of responses, then waits for a response
93 which is checked against the responses and the first to match is
94 returned. An empty response will match the first value in responses. The
95 input buffer is *not* cleared prior to the prompt!
98 responses: a List of Strings.
99 colours: a List of Functions taking and returning a String, used to
100 process the responses for display. Typically these will be functions
101 like red() but could be e.g. lambda x: "DisplayString".
102 If responses is omitted, defaults to ["Yes", "No"], [green, red].
103 If only colours is omitted, defaults to [bold, ...].
105 Returns a member of the List responses. (If called without optional
106 arguments, returns "Yes" or "No".)
107 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
109 if responses is None:
110 responses = ["Yes", "No"]
112 create_color_func("PROMPT_CHOICE_DEFAULT"),
113 create_color_func("PROMPT_CHOICE_OTHER")
115 elif colours is None:
117 colours=(colours*len(responses))[:len(responses)]
121 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
122 for key in responses:
123 # An empty response will match the first value in responses.
124 if response.upper()==key[:len(response)].upper():
126 print "Sorry, response '%s' not understood." % response,
127 except (EOFError, KeyboardInterrupt):
131 actions = frozenset([
132 "clean", "config", "depclean",
133 "info", "list-sets", "metadata",
134 "prune", "regen", "search",
135 "sync", "unmerge", "version",
138 "--ask", "--alphabetical",
139 "--buildpkg", "--buildpkgonly",
140 "--changelog", "--columns",
145 "--fetchonly", "--fetch-all-uri",
146 "--getbinpkg", "--getbinpkgonly",
147 "--help", "--ignore-default-opts",
151 "--nodeps", "--noreplace",
152 "--nospinner", "--oneshot",
153 "--onlydeps", "--pretend",
154 "--quiet", "--resume",
155 "--searchdesc", "--selective",
159 "--usepkg", "--usepkgonly",
166 "b":"--buildpkg", "B":"--buildpkgonly",
167 "c":"--clean", "C":"--unmerge",
168 "d":"--debug", "D":"--deep",
170 "f":"--fetchonly", "F":"--fetch-all-uri",
171 "g":"--getbinpkg", "G":"--getbinpkgonly",
173 "k":"--usepkg", "K":"--usepkgonly",
175 "n":"--noreplace", "N":"--newuse",
176 "o":"--onlydeps", "O":"--nodeps",
177 "p":"--pretend", "P":"--prune",
179 "s":"--search", "S":"--searchdesc",
182 "v":"--verbose", "V":"--version"
185 _emerge_log_dir = '/var/log'
187 def emergelog(xterm_titles, mystr, short_msg=None):
188 if xterm_titles and short_msg:
189 if "HOSTNAME" in os.environ:
190 short_msg = os.environ["HOSTNAME"]+": "+short_msg
191 xtermTitle(short_msg)
193 file_path = os.path.join(_emerge_log_dir, 'emerge.log')
194 mylogfile = open(file_path, "a")
195 portage.util.apply_secpass_permissions(file_path,
196 uid=portage.portage_uid, gid=portage.portage_gid,
200 mylock = portage.locks.lockfile(mylogfile)
201 # seek because we may have gotten held up by the lock.
202 # if so, we may not be positioned at the end of the file.
204 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
208 portage.locks.unlockfile(mylock)
210 except (IOError,OSError,portage.exception.PortageException), e:
212 print >> sys.stderr, "emergelog():",e
214 def countdown(secs=5, doing="Starting"):
216 print ">>> Waiting",secs,"seconds before starting..."
217 print ">>> (Control-C to abort)...\n"+doing+" in: ",
221 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
226 # formats a size given in bytes nicely
227 def format_size(mysize):
228 if isinstance(mysize, basestring):
230 if 0 != mysize % 1024:
231 # Always round up to the next kB so that it doesn't show 0 kB when
232 # some small file still needs to be fetched.
233 mysize += 1024 - mysize % 1024
234 mystr=str(mysize/1024)
238 mystr=mystr[:mycount]+","+mystr[mycount:]
242 def getgccversion(chost):
245 return: the current in-use gcc version
248 gcc_ver_command = 'gcc -dumpversion'
249 gcc_ver_prefix = 'gcc-'
251 gcc_not_found_error = red(
252 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
253 "!!! to update the environment of this terminal and possibly\n" +
254 "!!! other terminals also.\n"
257 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
258 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
259 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
261 mystatus, myoutput = commands.getstatusoutput(
262 chost + "-" + gcc_ver_command)
263 if mystatus == os.EX_OK:
264 return gcc_ver_prefix + myoutput
266 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
267 if mystatus == os.EX_OK:
268 return gcc_ver_prefix + myoutput
270 portage.writemsg(gcc_not_found_error, noiselevel=-1)
271 return "[unavailable]"
273 def getportageversion(portdir, target_root, profile, chost, vardb):
274 profilever = "unavailable"
276 realpath = os.path.realpath(profile)
277 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
278 if realpath.startswith(basepath):
279 profilever = realpath[1 + len(basepath):]
282 profilever = "!" + os.readlink(profile)
285 del realpath, basepath
288 libclist = vardb.match("virtual/libc")
289 libclist += vardb.match("virtual/glibc")
290 libclist = portage.util.unique_array(libclist)
292 xs=portage.catpkgsplit(x)
294 libcver+=","+"-".join(xs[1:])
296 libcver="-".join(xs[1:])
298 libcver="unavailable"
300 gccver = getgccversion(chost)
301 unameout=platform.release()+" "+platform.machine()
303 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
305 def create_depgraph_params(myopts, myaction):
306 #configure emerge engine parameters
308 # self: include _this_ package regardless of if it is merged.
309 # selective: exclude the package if it is merged
310 # recurse: go into the dependencies
311 # deep: go into the dependencies of already merged packages
312 # empty: pretend nothing is merged
313 # complete: completely account for all known dependencies
314 # remove: build graph for use in removing packages
315 myparams = set(["recurse"])
317 if myaction == "remove":
318 myparams.add("remove")
319 myparams.add("complete")
322 if "--update" in myopts or \
323 "--newuse" in myopts or \
324 "--reinstall" in myopts or \
325 "--noreplace" in myopts:
326 myparams.add("selective")
327 if "--emptytree" in myopts:
328 myparams.add("empty")
329 myparams.discard("selective")
330 if "--nodeps" in myopts:
331 myparams.discard("recurse")
332 if "--deep" in myopts:
334 if "--complete-graph" in myopts:
335 myparams.add("complete")
338 # search functionality
339 class search(object):
350 def __init__(self, root_config, spinner, searchdesc,
351 verbose, usepkg, usepkgonly):
352 """Searches the available and installed packages for the supplied search key.
353 The list of available and installed packages is created at object instantiation.
354 This makes successive searches faster."""
355 self.settings = root_config.settings
356 self.vartree = root_config.trees["vartree"]
357 self.spinner = spinner
358 self.verbose = verbose
359 self.searchdesc = searchdesc
360 self.root_config = root_config
361 self.setconfig = root_config.setconfig
362 self.matches = {"pkg" : []}
367 self.portdb = fake_portdb
368 for attrib in ("aux_get", "cp_all",
369 "xmatch", "findname", "getFetchMap"):
370 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
374 portdb = root_config.trees["porttree"].dbapi
375 bindb = root_config.trees["bintree"].dbapi
376 vardb = root_config.trees["vartree"].dbapi
378 if not usepkgonly and portdb._have_root_eclass_dir:
379 self._dbs.append(portdb)
381 if (usepkg or usepkgonly) and bindb.cp_all():
382 self._dbs.append(bindb)
384 self._dbs.append(vardb)
385 self._portdb = portdb
390 cp_all.update(db.cp_all())
391 return list(sorted(cp_all))
393 def _aux_get(self, *args, **kwargs):
396 return db.aux_get(*args, **kwargs)
401 def _findname(self, *args, **kwargs):
403 if db is not self._portdb:
404 # We don't want findname to return anything
405 # unless it's an ebuild in a portage tree.
406 # Otherwise, it's already built and we don't
409 func = getattr(db, "findname", None)
411 value = func(*args, **kwargs)
416 def _getFetchMap(self, *args, **kwargs):
418 func = getattr(db, "getFetchMap", None)
420 value = func(*args, **kwargs)
425 def _visible(self, db, cpv, metadata):
426 installed = db is self.vartree.dbapi
427 built = installed or db is not self._portdb
430 pkg_type = "installed"
433 return visible(self.settings,
434 Package(type_name=pkg_type, root_config=self.root_config,
435 cpv=cpv, built=built, installed=installed, metadata=metadata))
437 def _xmatch(self, level, atom):
439 This method does not expand old-style virtuals because it
440 is restricted to returning matches for a single ${CATEGORY}/${PN}
441 and old-style virual matches unreliable for that when querying
442 multiple package databases. If necessary, old-style virtuals
443 can be performed on atoms prior to calling this method.
445 cp = portage.dep_getkey(atom)
446 if level == "match-all":
449 if hasattr(db, "xmatch"):
450 matches.update(db.xmatch(level, atom))
452 matches.update(db.match(atom))
453 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
454 db._cpv_sort_ascending(result)
455 elif level == "match-visible":
458 if hasattr(db, "xmatch"):
459 matches.update(db.xmatch(level, atom))
461 db_keys = list(db._aux_cache_keys)
462 for cpv in db.match(atom):
463 metadata = izip(db_keys,
464 db.aux_get(cpv, db_keys))
465 if not self._visible(db, cpv, metadata):
468 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
469 db._cpv_sort_ascending(result)
470 elif level == "bestmatch-visible":
473 if hasattr(db, "xmatch"):
474 cpv = db.xmatch("bestmatch-visible", atom)
475 if not cpv or portage.cpv_getkey(cpv) != cp:
477 if not result or cpv == portage.best([cpv, result]):
480 db_keys = Package.metadata_keys
481 # break out of this loop with highest visible
482 # match, checked in descending order
483 for cpv in reversed(db.match(atom)):
484 if portage.cpv_getkey(cpv) != cp:
486 metadata = izip(db_keys,
487 db.aux_get(cpv, db_keys))
488 if not self._visible(db, cpv, metadata):
490 if not result or cpv == portage.best([cpv, result]):
494 raise NotImplementedError(level)
497 def execute(self,searchkey):
498 """Performs the search for the supplied search key"""
500 self.searchkey=searchkey
501 self.packagematches = []
504 self.matches = {"pkg":[], "desc":[], "set":[]}
507 self.matches = {"pkg":[], "set":[]}
508 print "Searching... ",
511 if self.searchkey.startswith('%'):
513 self.searchkey = self.searchkey[1:]
514 if self.searchkey.startswith('@'):
516 self.searchkey = self.searchkey[1:]
518 self.searchre=re.compile(self.searchkey,re.I)
520 self.searchre=re.compile(re.escape(self.searchkey), re.I)
521 for package in self.portdb.cp_all():
522 self.spinner.update()
525 match_string = package[:]
527 match_string = package.split("/")[-1]
530 if self.searchre.search(match_string):
531 if not self.portdb.xmatch("match-visible", package):
533 self.matches["pkg"].append([package,masked])
534 elif self.searchdesc: # DESCRIPTION searching
535 full_package = self.portdb.xmatch("bestmatch-visible", package)
537 #no match found; we don't want to query description
538 full_package = portage.best(
539 self.portdb.xmatch("match-all", package))
545 full_desc = self.portdb.aux_get(
546 full_package, ["DESCRIPTION"])[0]
548 print "emerge: search: aux_get() failed, skipping"
550 if self.searchre.search(full_desc):
551 self.matches["desc"].append([full_package,masked])
553 self.sdict = self.setconfig.getSets()
554 for setname in self.sdict:
555 self.spinner.update()
557 match_string = setname
559 match_string = setname.split("/")[-1]
561 if self.searchre.search(match_string):
562 self.matches["set"].append([setname, False])
563 elif self.searchdesc:
564 if self.searchre.search(
565 self.sdict[setname].getMetadata("DESCRIPTION")):
566 self.matches["set"].append([setname, False])
569 for mtype in self.matches:
570 self.matches[mtype].sort()
571 self.mlen += len(self.matches[mtype])
574 if not self.portdb.xmatch("match-all", cp):
577 if not self.portdb.xmatch("bestmatch-visible", cp):
579 self.matches["pkg"].append([cp, masked])
583 """Outputs the results of the search."""
584 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
585 print "[ Applications found : "+white(str(self.mlen))+" ]"
587 vardb = self.vartree.dbapi
588 for mtype in self.matches:
589 for match,masked in self.matches[mtype]:
593 full_package = self.portdb.xmatch(
594 "bestmatch-visible", match)
596 #no match found; we don't want to query description
598 full_package = portage.best(
599 self.portdb.xmatch("match-all",match))
600 elif mtype == "desc":
602 match = portage.cpv_getkey(match)
604 print green("*")+" "+white(match)
605 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
609 desc, homepage, license = self.portdb.aux_get(
610 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
612 print "emerge: search: aux_get() failed, skipping"
615 print green("*")+" "+white(match)+" "+red("[ Masked ]")
617 print green("*")+" "+white(match)
618 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
622 mycat = match.split("/")[0]
623 mypkg = match.split("/")[1]
624 mycpv = match + "-" + myversion
625 myebuild = self.portdb.findname(mycpv)
627 pkgdir = os.path.dirname(myebuild)
628 from portage import manifest
629 mf = manifest.Manifest(
630 pkgdir, self.settings["DISTDIR"])
632 uri_map = self.portdb.getFetchMap(mycpv)
633 except portage.exception.InvalidDependString, e:
634 file_size_str = "Unknown (%s)" % (e,)
638 mysum[0] = mf.getDistfilesSize(uri_map)
640 file_size_str = "Unknown (missing " + \
641 "digest for %s)" % (e,)
646 if db is not vardb and \
647 db.cpv_exists(mycpv):
649 if not myebuild and hasattr(db, "bintree"):
650 myebuild = db.bintree.getname(mycpv)
652 mysum[0] = os.stat(myebuild).st_size
657 if myebuild and file_size_str is None:
658 mystr = str(mysum[0] / 1024)
662 mystr = mystr[:mycount] + "," + mystr[mycount:]
663 file_size_str = mystr + " kB"
667 print " ", darkgreen("Latest version available:"),myversion
668 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
671 (darkgreen("Size of files:"), file_size_str)
672 print " ", darkgreen("Homepage:")+" ",homepage
673 print " ", darkgreen("Description:")+" ",desc
674 print " ", darkgreen("License:")+" ",license
679 def getInstallationStatus(self,package):
680 installed_package = self.vartree.dep_bestmatch(package)
682 version = self.getVersion(installed_package,search.VERSION_RELEASE)
684 result = darkgreen("Latest version installed:")+" "+version
686 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
689 def getVersion(self,full_package,detail):
690 if len(full_package) > 1:
691 package_parts = portage.catpkgsplit(full_package)
692 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
693 result = package_parts[2]+ "-" + package_parts[3]
695 result = package_parts[2]
700 class RootConfig(object):
701 """This is used internally by depgraph to track information about a
705 "ebuild" : "porttree",
706 "binary" : "bintree",
707 "installed" : "vartree"
711 for k, v in pkg_tree_map.iteritems():
714 def __init__(self, settings, trees, setconfig):
716 self.settings = settings
717 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
718 self.root = self.settings["ROOT"]
719 self.setconfig = setconfig
720 if setconfig is None:
723 self.sets = self.setconfig.getSets()
724 self.visible_pkgs = PackageVirtualDbapi(self.settings)
726 def create_world_atom(pkg, args_set, root_config):
727 """Create a new atom for the world file if one does not exist. If the
728 argument atom is precise enough to identify a specific slot then a slot
729 atom will be returned. Atoms that are in the system set may also be stored
730 in world since system atoms can only match one slot while world atoms can
731 be greedy with respect to slots. Unslotted system packages will not be
734 arg_atom = args_set.findAtomForPackage(pkg)
737 cp = portage.dep_getkey(arg_atom)
739 sets = root_config.sets
740 portdb = root_config.trees["porttree"].dbapi
741 vardb = root_config.trees["vartree"].dbapi
742 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
743 for cpv in portdb.match(cp))
744 slotted = len(available_slots) > 1 or \
745 (len(available_slots) == 1 and "0" not in available_slots)
747 # check the vdb in case this is multislot
748 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
749 for cpv in vardb.match(cp))
750 slotted = len(available_slots) > 1 or \
751 (len(available_slots) == 1 and "0" not in available_slots)
752 if slotted and arg_atom != cp:
753 # If the user gave a specific atom, store it as a
754 # slot atom in the world file.
755 slot_atom = pkg.slot_atom
757 # For USE=multislot, there are a couple of cases to
760 # 1) SLOT="0", but the real SLOT spontaneously changed to some
761 # unknown value, so just record an unslotted atom.
763 # 2) SLOT comes from an installed package and there is no
764 # matching SLOT in the portage tree.
766 # Make sure that the slot atom is available in either the
767 # portdb or the vardb, since otherwise the user certainly
768 # doesn't want the SLOT atom recorded in the world file
769 # (case 1 above). If it's only available in the vardb,
770 # the user may be trying to prevent a USE=multislot
771 # package from being removed by --depclean (case 2 above).
774 if not portdb.match(slot_atom):
775 # SLOT seems to come from an installed multislot package
777 # If there is no installed package matching the SLOT atom,
778 # it probably changed SLOT spontaneously due to USE=multislot,
779 # so just record an unslotted atom.
780 if vardb.match(slot_atom):
781 # Now verify that the argument is precise
782 # enough to identify a specific slot.
783 matches = mydb.match(arg_atom)
784 matched_slots = set()
786 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
787 if len(matched_slots) == 1:
788 new_world_atom = slot_atom
790 if new_world_atom == sets["world"].findAtomForPackage(pkg):
791 # Both atoms would be identical, so there's nothing to add.
794 # Unlike world atoms, system atoms are not greedy for slots, so they
795 # can't be safely excluded from world if they are slotted.
796 system_atom = sets["system"].findAtomForPackage(pkg)
798 if not portage.dep_getkey(system_atom).startswith("virtual/"):
800 # System virtuals aren't safe to exclude from world since they can
801 # match multiple old-style virtuals but only one of them will be
802 # pulled in by update or depclean.
803 providers = portdb.mysettings.getvirtuals().get(
804 portage.dep_getkey(system_atom))
805 if providers and len(providers) == 1 and providers[0] == cp:
807 return new_world_atom
809 def filter_iuse_defaults(iuse):
811 if flag.startswith("+") or flag.startswith("-"):
816 def _find_deep_system_runtime_deps(graph):
817 deep_system_deps = set()
820 if not isinstance(node, Package) or \
821 node.operation == 'uninstall':
823 if node.root_config.sets['system'].findAtomForPackage(node):
824 node_stack.append(node)
826 def ignore_priority(priority):
828 Ignore non-runtime priorities.
830 if isinstance(priority, DepPriority) and \
831 (priority.runtime or priority.runtime_post):
836 node = node_stack.pop()
837 if node in deep_system_deps:
839 deep_system_deps.add(node)
840 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
841 if not isinstance(child, Package) or \
842 child.operation == 'uninstall':
844 node_stack.append(child)
846 return deep_system_deps
848 class FakeVartree(portage.vartree):
849 """This is implements an in-memory copy of a vartree instance that provides
850 all the interfaces required for use by the depgraph. The vardb is locked
851 during the constructor call just long enough to read a copy of the
852 installed package information. This allows the depgraph to do it's
853 dependency calculations without holding a lock on the vardb. It also
854 allows things like vardb global updates to be done in memory so that the
855 user doesn't necessarily need write access to the vardb in cases where
856 global updates are necessary (updates are performed when necessary if there
857 is not a matching ebuild in the tree)."""
858 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
859 self._root_config = root_config
860 if pkg_cache is None:
862 real_vartree = root_config.trees["vartree"]
863 portdb = root_config.trees["porttree"].dbapi
864 self.root = real_vartree.root
865 self.settings = real_vartree.settings
866 mykeys = list(real_vartree.dbapi._aux_cache_keys)
867 if "_mtime_" not in mykeys:
868 mykeys.append("_mtime_")
869 self._db_keys = mykeys
870 self._pkg_cache = pkg_cache
871 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
872 vdb_path = os.path.join(self.root, portage.VDB_PATH)
874 # At least the parent needs to exist for the lock file.
875 portage.util.ensure_dirs(vdb_path)
876 except portage.exception.PortageException:
880 if acquire_lock and os.access(vdb_path, os.W_OK):
881 vdb_lock = portage.locks.lockdir(vdb_path)
882 real_dbapi = real_vartree.dbapi
884 for cpv in real_dbapi.cpv_all():
885 cache_key = ("installed", self.root, cpv, "nomerge")
886 pkg = self._pkg_cache.get(cache_key)
888 metadata = pkg.metadata
890 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
891 myslot = metadata["SLOT"]
892 mycp = portage.dep_getkey(cpv)
893 myslot_atom = "%s:%s" % (mycp, myslot)
895 mycounter = long(metadata["COUNTER"])
898 metadata["COUNTER"] = str(mycounter)
899 other_counter = slot_counters.get(myslot_atom, None)
900 if other_counter is not None:
901 if other_counter > mycounter:
903 slot_counters[myslot_atom] = mycounter
905 pkg = Package(built=True, cpv=cpv,
906 installed=True, metadata=metadata,
907 root_config=root_config, type_name="installed")
908 self._pkg_cache[pkg] = pkg
909 self.dbapi.cpv_inject(pkg)
910 real_dbapi.flush_cache()
913 portage.locks.unlockdir(vdb_lock)
914 # Populate the old-style virtuals using the cached values.
915 if not self.settings.treeVirtuals:
916 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
917 portage.getCPFromCPV, self.get_all_provides())
919 # Intialize variables needed for lazy cache pulls of the live ebuild
920 # metadata. This ensures that the vardb lock is released ASAP, without
921 # being delayed in case cache generation is triggered.
922 self._aux_get = self.dbapi.aux_get
923 self.dbapi.aux_get = self._aux_get_wrapper
924 self._match = self.dbapi.match
925 self.dbapi.match = self._match_wrapper
926 self._aux_get_history = set()
927 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
928 self._portdb = portdb
929 self._global_updates = None
931 def _match_wrapper(self, cpv, use_cache=1):
933 Make sure the metadata in Package instances gets updated for any
934 cpv that is returned from a match() call, since the metadata can
935 be accessed directly from the Package instance instead of via
938 matches = self._match(cpv, use_cache=use_cache)
940 if cpv in self._aux_get_history:
942 self._aux_get_wrapper(cpv, [])
945 def _aux_get_wrapper(self, pkg, wants):
946 if pkg in self._aux_get_history:
947 return self._aux_get(pkg, wants)
948 self._aux_get_history.add(pkg)
950 # Use the live ebuild metadata if possible.
951 live_metadata = dict(izip(self._portdb_keys,
952 self._portdb.aux_get(pkg, self._portdb_keys)))
953 if not portage.eapi_is_supported(live_metadata["EAPI"]):
955 self.dbapi.aux_update(pkg, live_metadata)
956 except (KeyError, portage.exception.PortageException):
957 if self._global_updates is None:
958 self._global_updates = \
959 grab_global_updates(self._portdb.porttree_root)
960 perform_global_updates(
961 pkg, self.dbapi, self._global_updates)
962 return self._aux_get(pkg, wants)
964 def sync(self, acquire_lock=1):
966 Call this method to synchronize state with the real vardb
967 after one or more packages may have been installed or
970 vdb_path = os.path.join(self.root, portage.VDB_PATH)
972 # At least the parent needs to exist for the lock file.
973 portage.util.ensure_dirs(vdb_path)
974 except portage.exception.PortageException:
978 if acquire_lock and os.access(vdb_path, os.W_OK):
979 vdb_lock = portage.locks.lockdir(vdb_path)
983 portage.locks.unlockdir(vdb_lock)
987 real_vardb = self._root_config.trees["vartree"].dbapi
988 current_cpv_set = frozenset(real_vardb.cpv_all())
989 pkg_vardb = self.dbapi
990 aux_get_history = self._aux_get_history
992 # Remove any packages that have been uninstalled.
993 for pkg in list(pkg_vardb):
994 if pkg.cpv not in current_cpv_set:
995 pkg_vardb.cpv_remove(pkg)
996 aux_get_history.discard(pkg.cpv)
998 # Validate counters and timestamps.
1001 validation_keys = ["COUNTER", "_mtime_"]
1002 for cpv in current_cpv_set:
1004 pkg_hash_key = ("installed", root, cpv, "nomerge")
1005 pkg = pkg_vardb.get(pkg_hash_key)
1007 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1009 counter = long(counter)
1013 if counter != pkg.counter or \
1015 pkg_vardb.cpv_remove(pkg)
1016 aux_get_history.discard(pkg.cpv)
1020 pkg = self._pkg(cpv)
1022 other_counter = slot_counters.get(pkg.slot_atom)
1023 if other_counter is not None:
1024 if other_counter > pkg.counter:
1027 slot_counters[pkg.slot_atom] = pkg.counter
1028 pkg_vardb.cpv_inject(pkg)
1030 real_vardb.flush_cache()
1032 def _pkg(self, cpv):
1033 root_config = self._root_config
1034 real_vardb = root_config.trees["vartree"].dbapi
1035 pkg = Package(cpv=cpv, installed=True,
1036 metadata=izip(self._db_keys,
1037 real_vardb.aux_get(cpv, self._db_keys)),
1038 root_config=root_config,
1039 type_name="installed")
1042 mycounter = long(pkg.metadata["COUNTER"])
1045 pkg.metadata["COUNTER"] = str(mycounter)
1049 def grab_global_updates(portdir):
1050 from portage.update import grab_updates, parse_updates
1051 updpath = os.path.join(portdir, "profiles", "updates")
1053 rawupdates = grab_updates(updpath)
1054 except portage.exception.DirectoryNotFound:
1057 for mykey, mystat, mycontent in rawupdates:
1058 commands, errors = parse_updates(mycontent)
1059 upd_commands.extend(commands)
1062 def perform_global_updates(mycpv, mydb, mycommands):
1063 from portage.update import update_dbentries
1064 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1065 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1066 updates = update_dbentries(mycommands, aux_dict)
1068 mydb.aux_update(mycpv, updates)
1070 def visible(pkgsettings, pkg):
1072 Check if a package is visible. This can raise an InvalidDependString
1073 exception if LICENSE is invalid.
1074 TODO: optionally generate a list of masking reasons
1076 @returns: True if the package is visible, False otherwise.
1078 if not pkg.metadata["SLOT"]:
1080 if not pkg.installed:
1081 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1083 eapi = pkg.metadata["EAPI"]
1084 if not portage.eapi_is_supported(eapi):
1086 if not pkg.installed:
1087 if portage._eapi_is_deprecated(eapi):
1089 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1091 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1093 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1096 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1098 except portage.exception.InvalidDependString:
1102 def get_masking_status(pkg, pkgsettings, root_config):
1104 mreasons = portage.getmaskingstatus(
1105 pkg, settings=pkgsettings,
1106 portdb=root_config.trees["porttree"].dbapi)
1108 if not pkg.installed:
1109 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1110 mreasons.append("CHOST: %s" % \
1111 pkg.metadata["CHOST"])
1113 if not pkg.metadata["SLOT"]:
1114 mreasons.append("invalid: SLOT is undefined")
1118 def get_mask_info(root_config, cpv, pkgsettings,
1119 db, pkg_type, built, installed, db_keys):
1122 metadata = dict(izip(db_keys,
1123 db.aux_get(cpv, db_keys)))
1126 if metadata and not built:
1127 pkgsettings.setcpv(cpv, mydb=metadata)
1128 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1129 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1130 if metadata is None:
1131 mreasons = ["corruption"]
1133 eapi = metadata['EAPI']
1136 if not portage.eapi_is_supported(eapi):
1137 mreasons = ['EAPI %s' % eapi]
1139 pkg = Package(type_name=pkg_type, root_config=root_config,
1140 cpv=cpv, built=built, installed=installed, metadata=metadata)
1141 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1142 return metadata, mreasons
1144 def show_masked_packages(masked_packages):
1145 shown_licenses = set()
1146 shown_comments = set()
1147 # Maybe there is both an ebuild and a binary. Only
1148 # show one of them to avoid redundant appearance.
1150 have_eapi_mask = False
1151 for (root_config, pkgsettings, cpv,
1152 metadata, mreasons) in masked_packages:
1153 if cpv in shown_cpvs:
1156 comment, filename = None, None
1157 if "package.mask" in mreasons:
1158 comment, filename = \
1159 portage.getmaskingreason(
1160 cpv, metadata=metadata,
1161 settings=pkgsettings,
1162 portdb=root_config.trees["porttree"].dbapi,
1163 return_location=True)
1164 missing_licenses = []
1166 if not portage.eapi_is_supported(metadata["EAPI"]):
1167 have_eapi_mask = True
1169 missing_licenses = \
1170 pkgsettings._getMissingLicenses(
1172 except portage.exception.InvalidDependString:
1173 # This will have already been reported
1174 # above via mreasons.
1177 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1178 if comment and comment not in shown_comments:
1181 shown_comments.add(comment)
1182 portdb = root_config.trees["porttree"].dbapi
1183 for l in missing_licenses:
1184 l_path = portdb.findLicensePath(l)
1185 if l in shown_licenses:
1187 msg = ("A copy of the '%s' license" + \
1188 " is located at '%s'.") % (l, l_path)
1191 shown_licenses.add(l)
1192 return have_eapi_mask
1194 class Package(Task):
1196 __hash__ = Task.__hash__
1197 __slots__ = ("built", "cpv", "depth",
1198 "installed", "metadata", "onlydeps", "operation",
1199 "root_config", "type_name",
1200 "category", "counter", "cp", "cpv_split",
1201 "inherited", "iuse", "mtime",
1202 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1205 "CHOST", "COUNTER", "DEPEND", "EAPI",
1206 "INHERITED", "IUSE", "KEYWORDS",
1207 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1208 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1210 def __init__(self, **kwargs):
1211 Task.__init__(self, **kwargs)
1212 self.root = self.root_config.root
1213 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1214 self.cp = portage.cpv_getkey(self.cpv)
1217 # Avoid an InvalidAtom exception when creating slot_atom.
1218 # This package instance will be masked due to empty SLOT.
1220 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1221 self.category, self.pf = portage.catsplit(self.cpv)
1222 self.cpv_split = portage.catpkgsplit(self.cpv)
1223 self.pv_split = self.cpv_split[1:]
1227 __slots__ = ("__weakref__", "enabled")
1229 def __init__(self, use):
1230 self.enabled = frozenset(use)
1232 class _iuse(object):
1234 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1236 def __init__(self, tokens, iuse_implicit):
1237 self.tokens = tuple(tokens)
1238 self.iuse_implicit = iuse_implicit
1245 enabled.append(x[1:])
1247 disabled.append(x[1:])
1250 self.enabled = frozenset(enabled)
1251 self.disabled = frozenset(disabled)
1252 self.all = frozenset(chain(enabled, disabled, other))
1254 def __getattribute__(self, name):
1257 return object.__getattribute__(self, "regex")
1258 except AttributeError:
1259 all = object.__getattribute__(self, "all")
1260 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1261 # Escape anything except ".*" which is supposed
1262 # to pass through from _get_implicit_iuse()
1263 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1264 regex = "^(%s)$" % "|".join(regex)
1265 regex = regex.replace("\\.\\*", ".*")
1266 self.regex = re.compile(regex)
1267 return object.__getattribute__(self, name)
1269 def _get_hash_key(self):
1270 hash_key = getattr(self, "_hash_key", None)
1271 if hash_key is None:
1272 if self.operation is None:
1273 self.operation = "merge"
1274 if self.onlydeps or self.installed:
1275 self.operation = "nomerge"
1277 (self.type_name, self.root, self.cpv, self.operation)
1278 return self._hash_key
1280 def __lt__(self, other):
1281 if other.cp != self.cp:
1283 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1287 def __le__(self, other):
1288 if other.cp != self.cp:
1290 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1294 def __gt__(self, other):
1295 if other.cp != self.cp:
1297 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1301 def __ge__(self, other):
1302 if other.cp != self.cp:
1304 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1308 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1309 if not x.startswith("UNUSED_"))
1310 _all_metadata_keys.discard("CDEPEND")
1311 _all_metadata_keys.update(Package.metadata_keys)
1313 from portage.cache.mappings import slot_dict_class
1314 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1316 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1318 Detect metadata updates and synchronize Package attributes.
1321 __slots__ = ("_pkg",)
1322 _wrapped_keys = frozenset(
1323 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1325 def __init__(self, pkg, metadata):
1326 _PackageMetadataWrapperBase.__init__(self)
1328 self.update(metadata)
1330 def __setitem__(self, k, v):
1331 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1332 if k in self._wrapped_keys:
1333 getattr(self, "_set_" + k.lower())(k, v)
1335 def _set_inherited(self, k, v):
1336 if isinstance(v, basestring):
1337 v = frozenset(v.split())
1338 self._pkg.inherited = v
1340 def _set_iuse(self, k, v):
1341 self._pkg.iuse = self._pkg._iuse(
1342 v.split(), self._pkg.root_config.iuse_implicit)
1344 def _set_slot(self, k, v):
1347 def _set_use(self, k, v):
1348 self._pkg.use = self._pkg._use(v.split())
1350 def _set_counter(self, k, v):
1351 if isinstance(v, basestring):
1356 self._pkg.counter = v
1358 def _set__mtime_(self, k, v):
1359 if isinstance(v, basestring):
1366 class PackageUninstall(AsynchronousTask):
1368 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
1372 unmerge(self.pkg.root_config, self.opts, "unmerge",
1373 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
1374 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
1375 writemsg_level=self._writemsg_level)
1376 except UninstallFailure, e:
1377 self.returncode = e.status
1379 self.returncode = os.EX_OK
1382 def _writemsg_level(self, msg, level=0, noiselevel=0):
1384 log_path = self.settings.get("PORTAGE_LOG_FILE")
1385 background = self.background
1387 if log_path is None:
1388 if not (background and level < logging.WARNING):
1389 portage.util.writemsg_level(msg,
1390 level=level, noiselevel=noiselevel)
1393 portage.util.writemsg_level(msg,
1394 level=level, noiselevel=noiselevel)
1396 f = open(log_path, 'a')
1402 class MergeListItem(CompositeTask):
1405 TODO: For parallel scheduling, everything here needs asynchronous
1406 execution support (start, poll, and wait methods).
1409 __slots__ = ("args_set",
1410 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
1411 "find_blockers", "logger", "mtimedb", "pkg",
1412 "pkg_count", "pkg_to_replace", "prefetcher",
1413 "settings", "statusMessage", "world_atom") + \
1419 build_opts = self.build_opts
1422 # uninstall, executed by self.merge()
1423 self.returncode = os.EX_OK
1427 args_set = self.args_set
1428 find_blockers = self.find_blockers
1429 logger = self.logger
1430 mtimedb = self.mtimedb
1431 pkg_count = self.pkg_count
1432 scheduler = self.scheduler
1433 settings = self.settings
1434 world_atom = self.world_atom
1435 ldpath_mtimes = mtimedb["ldpath"]
1437 action_desc = "Emerging"
1439 if pkg.type_name == "binary":
1440 action_desc += " binary"
1442 if build_opts.fetchonly:
1443 action_desc = "Fetching"
1445 msg = "%s (%s of %s) %s" % \
1447 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
1448 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
1449 colorize("GOOD", pkg.cpv))
1451 portdb = pkg.root_config.trees["porttree"].dbapi
1452 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
1453 if portdir_repo_name:
1454 pkg_repo_name = pkg.metadata.get("repository")
1455 if pkg_repo_name != portdir_repo_name:
1456 if not pkg_repo_name:
1457 pkg_repo_name = "unknown repo"
1458 msg += " from %s" % pkg_repo_name
1461 msg += " %s %s" % (preposition, pkg.root)
1463 if not build_opts.pretend:
1464 self.statusMessage(msg)
1465 logger.log(" >>> emerge (%s of %s) %s to %s" % \
1466 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
1468 if pkg.type_name == "ebuild":
1470 build = EbuildBuild(args_set=args_set,
1471 background=self.background,
1472 config_pool=self.config_pool,
1473 find_blockers=find_blockers,
1474 ldpath_mtimes=ldpath_mtimes, logger=logger,
1475 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
1476 prefetcher=self.prefetcher, scheduler=scheduler,
1477 settings=settings, world_atom=world_atom)
1479 self._install_task = build
1480 self._start_task(build, self._default_final_exit)
1483 elif pkg.type_name == "binary":
1485 binpkg = Binpkg(background=self.background,
1486 find_blockers=find_blockers,
1487 ldpath_mtimes=ldpath_mtimes, logger=logger,
1488 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
1489 prefetcher=self.prefetcher, settings=settings,
1490 scheduler=scheduler, world_atom=world_atom)
1492 self._install_task = binpkg
1493 self._start_task(binpkg, self._default_final_exit)
1497 self._install_task.poll()
1498 return self.returncode
1501 self._install_task.wait()
1502 return self.returncode
1507 build_opts = self.build_opts
1508 find_blockers = self.find_blockers
1509 logger = self.logger
1510 mtimedb = self.mtimedb
1511 pkg_count = self.pkg_count
1512 prefetcher = self.prefetcher
1513 scheduler = self.scheduler
1514 settings = self.settings
1515 world_atom = self.world_atom
1516 ldpath_mtimes = mtimedb["ldpath"]
1519 if not (build_opts.buildpkgonly or \
1520 build_opts.fetchonly or build_opts.pretend):
1522 uninstall = PackageUninstall(background=self.background,
1523 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
1524 pkg=pkg, scheduler=scheduler, settings=settings)
1527 retval = uninstall.wait()
1528 if retval != os.EX_OK:
1532 if build_opts.fetchonly or \
1533 build_opts.buildpkgonly:
1534 return self.returncode
1536 retval = self._install_task.install()
1539 class BlockerDB(object):
1541 def __init__(self, root_config):
1542 self._root_config = root_config
1543 self._vartree = root_config.trees["vartree"]
1544 self._portdb = root_config.trees["porttree"].dbapi
1546 self._dep_check_trees = None
1547 self._fake_vartree = None
1549 def _get_fake_vartree(self, acquire_lock=0):
1550 fake_vartree = self._fake_vartree
1551 if fake_vartree is None:
1552 fake_vartree = FakeVartree(self._root_config,
1553 acquire_lock=acquire_lock)
1554 self._fake_vartree = fake_vartree
1555 self._dep_check_trees = { self._vartree.root : {
1556 "porttree" : fake_vartree,
1557 "vartree" : fake_vartree,
1560 fake_vartree.sync(acquire_lock=acquire_lock)
1563 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
1564 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
1565 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1566 settings = self._vartree.settings
1567 stale_cache = set(blocker_cache)
1568 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
1569 dep_check_trees = self._dep_check_trees
1570 vardb = fake_vartree.dbapi
1571 installed_pkgs = list(vardb)
1573 for inst_pkg in installed_pkgs:
1574 stale_cache.discard(inst_pkg.cpv)
1575 cached_blockers = blocker_cache.get(inst_pkg.cpv)
1576 if cached_blockers is not None and \
1577 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
1578 cached_blockers = None
1579 if cached_blockers is not None:
1580 blocker_atoms = cached_blockers.atoms
1582 # Use aux_get() to trigger FakeVartree global
1583 # updates on *DEPEND when appropriate.
1584 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
1586 portage.dep._dep_check_strict = False
1587 success, atoms = portage.dep_check(depstr,
1588 vardb, settings, myuse=inst_pkg.use.enabled,
1589 trees=dep_check_trees, myroot=inst_pkg.root)
1591 portage.dep._dep_check_strict = True
1593 pkg_location = os.path.join(inst_pkg.root,
1594 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
1595 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
1596 (pkg_location, atoms), noiselevel=-1)
1599 blocker_atoms = [atom for atom in atoms \
1600 if atom.startswith("!")]
1601 blocker_atoms.sort()
1602 counter = long(inst_pkg.metadata["COUNTER"])
1603 blocker_cache[inst_pkg.cpv] = \
1604 blocker_cache.BlockerData(counter, blocker_atoms)
1605 for cpv in stale_cache:
1606 del blocker_cache[cpv]
1607 blocker_cache.flush()
1609 blocker_parents = digraph()
1611 for pkg in installed_pkgs:
1612 for blocker_atom in blocker_cache[pkg.cpv].atoms:
1613 blocker_atom = blocker_atom.lstrip("!")
1614 blocker_atoms.append(blocker_atom)
1615 blocker_parents.add(blocker_atom, pkg)
1617 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1618 blocking_pkgs = set()
1619 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
1620 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
1622 # Check for blockers in the other direction.
1623 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
1625 portage.dep._dep_check_strict = False
1626 success, atoms = portage.dep_check(depstr,
1627 vardb, settings, myuse=new_pkg.use.enabled,
1628 trees=dep_check_trees, myroot=new_pkg.root)
1630 portage.dep._dep_check_strict = True
1632 # We should never get this far with invalid deps.
1633 show_invalid_depstring_notice(new_pkg, depstr, atoms)
1636 blocker_atoms = [atom.lstrip("!") for atom in atoms \
1639 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1640 for inst_pkg in installed_pkgs:
1642 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
1643 except (portage.exception.InvalidDependString, StopIteration):
1645 blocking_pkgs.add(inst_pkg)
1647 return blocking_pkgs
1649 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1651 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
1652 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
1653 p_type, p_root, p_key, p_status = parent_node
1655 if p_status == "nomerge":
1656 category, pf = portage.catsplit(p_key)
1657 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1658 msg.append("Portage is unable to process the dependencies of the ")
1659 msg.append("'%s' package. " % p_key)
1660 msg.append("In order to correct this problem, the package ")
1661 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1662 msg.append("As a temporary workaround, the --nodeps option can ")
1663 msg.append("be used to ignore all dependencies. For reference, ")
1664 msg.append("the problematic dependencies can be found in the ")
1665 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1667 msg.append("This package can not be installed. ")
1668 msg.append("Please notify the '%s' package maintainer " % p_key)
1669 msg.append("about this problem.")
1671 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
1672 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
1674 class depgraph(object):
1676 pkg_tree_map = RootConfig.pkg_tree_map
1678 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1680 def __init__(self, settings, trees, myopts, myparams, spinner):
1681 self.settings = settings
1682 self.target_root = settings["ROOT"]
1683 self.myopts = myopts
1684 self.myparams = myparams
1686 if settings.get("PORTAGE_DEBUG", "") == "1":
1688 self.spinner = spinner
1689 self._running_root = trees["/"]["root_config"]
1690 self._opts_no_restart = Scheduler._opts_no_restart
1691 self.pkgsettings = {}
1692 # Maps slot atom to package for each Package added to the graph.
1693 self._slot_pkg_map = {}
1694 # Maps nodes to the reasons they were selected for reinstallation.
1695 self._reinstall_nodes = {}
1698 self._trees_orig = trees
1700 # Contains a filtered view of preferred packages that are selected
1701 # from available repositories.
1702 self._filtered_trees = {}
1703 # Contains installed packages and new packages that have been added
1705 self._graph_trees = {}
1706 # All Package instances
1707 self._pkg_cache = {}
1708 for myroot in trees:
1709 self.trees[myroot] = {}
1710 # Create a RootConfig instance that references
1711 # the FakeVartree instead of the real one.
1712 self.roots[myroot] = RootConfig(
1713 trees[myroot]["vartree"].settings,
1715 trees[myroot]["root_config"].setconfig)
1716 for tree in ("porttree", "bintree"):
1717 self.trees[myroot][tree] = trees[myroot][tree]
1718 self.trees[myroot]["vartree"] = \
1719 FakeVartree(trees[myroot]["root_config"],
1720 pkg_cache=self._pkg_cache)
1721 self.pkgsettings[myroot] = portage.config(
1722 clone=self.trees[myroot]["vartree"].settings)
1723 self._slot_pkg_map[myroot] = {}
1724 vardb = self.trees[myroot]["vartree"].dbapi
1725 preload_installed_pkgs = "--nodeps" not in self.myopts and \
1726 "--buildpkgonly" not in self.myopts
1727 # This fakedbapi instance will model the state that the vdb will
1728 # have after new packages have been installed.
1729 fakedb = PackageVirtualDbapi(vardb.settings)
1730 if preload_installed_pkgs:
1732 self.spinner.update()
1733 # This triggers metadata updates via FakeVartree.
1734 vardb.aux_get(pkg.cpv, [])
1735 fakedb.cpv_inject(pkg)
1737 # Now that the vardb state is cached in our FakeVartree,
1738 # we won't be needing the real vartree cache for awhile.
1739 # To make some room on the heap, clear the vardbapi
1741 trees[myroot]["vartree"].dbapi._clear_cache()
1744 self.mydbapi[myroot] = fakedb
1747 graph_tree.dbapi = fakedb
1748 self._graph_trees[myroot] = {}
1749 self._filtered_trees[myroot] = {}
1750 # Substitute the graph tree for the vartree in dep_check() since we
1751 # want atom selections to be consistent with package selections
1752 # have already been made.
1753 self._graph_trees[myroot]["porttree"] = graph_tree
1754 self._graph_trees[myroot]["vartree"] = graph_tree
1755 def filtered_tree():
1757 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1758 self._filtered_trees[myroot]["porttree"] = filtered_tree
1760 # Passing in graph_tree as the vartree here could lead to better
1761 # atom selections in some cases by causing atoms for packages that
1762 # have been added to the graph to be preferred over other choices.
1763 # However, it can trigger atom selections that result in
1764 # unresolvable direct circular dependencies. For example, this
1765 # happens with gwydion-dylan which depends on either itself or
1766 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1767 # gwydion-dylan-bin needs to be selected in order to avoid a
1768 # an unresolvable direct circular dependency.
1770 # To solve the problem described above, pass in "graph_db" so that
1771 # packages that have been added to the graph are distinguishable
1772 # from other available packages and installed packages. Also, pass
1773 # the parent package into self._select_atoms() calls so that
1774 # unresolvable direct circular dependencies can be detected and
1775 # avoided when possible.
1776 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1777 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1780 portdb = self.trees[myroot]["porttree"].dbapi
1781 bindb = self.trees[myroot]["bintree"].dbapi
1782 vardb = self.trees[myroot]["vartree"].dbapi
1783 # (db, pkg_type, built, installed, db_keys)
1784 if "--usepkgonly" not in self.myopts:
1785 db_keys = list(portdb._aux_cache_keys)
1786 dbs.append((portdb, "ebuild", False, False, db_keys))
1787 if "--usepkg" in self.myopts:
1788 db_keys = list(bindb._aux_cache_keys)
1789 dbs.append((bindb, "binary", True, False, db_keys))
1790 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
1791 dbs.append((vardb, "installed", True, True, db_keys))
1792 self._filtered_trees[myroot]["dbs"] = dbs
1793 if "--usepkg" in self.myopts:
1794 self.trees[myroot]["bintree"].populate(
1795 "--getbinpkg" in self.myopts,
1796 "--getbinpkgonly" in self.myopts)
1799 self.digraph=portage.digraph()
1800 # contains all sets added to the graph
1802 # contains atoms given as arguments
1803 self._sets["args"] = InternalPackageSet()
1804 # contains all atoms from all sets added to the graph, including
1805 # atoms given as arguments
1806 self._set_atoms = InternalPackageSet()
1807 self._atom_arg_map = {}
1808 # contains all nodes pulled in by self._set_atoms
1809 self._set_nodes = set()
1810 # Contains only Blocker -> Uninstall edges
1811 self._blocker_uninstalls = digraph()
1812 # Contains only Package -> Blocker edges
1813 self._blocker_parents = digraph()
1814 # Contains only irrelevant Package -> Blocker edges
1815 self._irrelevant_blockers = digraph()
1816 # Contains only unsolvable Package -> Blocker edges
1817 self._unsolvable_blockers = digraph()
1818 # Contains all Blocker -> Blocked Package edges
1819 self._blocked_pkgs = digraph()
1820 # Contains world packages that have been protected from
1821 # uninstallation but may not have been added to the graph
1822 # if the graph is not complete yet.
1823 self._blocked_world_pkgs = {}
1824 self._slot_collision_info = {}
1825 # Slot collision nodes are not allowed to block other packages since
1826 # blocker validation is only able to account for one package per slot.
1827 self._slot_collision_nodes = set()
1828 self._parent_atoms = {}
1829 self._slot_conflict_parent_atoms = set()
1830 self._serialized_tasks_cache = None
1831 self._scheduler_graph = None
1832 self._displayed_list = None
1833 self._pprovided_args = []
1834 self._missing_args = []
1835 self._masked_installed = set()
1836 self._unsatisfied_deps_for_display = []
1837 self._unsatisfied_blockers_for_display = None
1838 self._circular_deps_for_display = None
1839 self._dep_stack = []
1840 self._dep_disjunctive_stack = []
1841 self._unsatisfied_deps = []
1842 self._initially_unsatisfied_deps = []
1843 self._ignored_deps = []
1844 self._required_set_names = set(["system", "world"])
1845 self._select_atoms = self._select_atoms_highest_available
1846 self._select_package = self._select_pkg_highest_available
1847 self._highest_pkg_cache = {}
1849 def _show_slot_collision_notice(self):
1850 """Show an informational message advising the user to mask one of the
1851 the packages. In some cases it may be possible to resolve this
1852 automatically, but support for backtracking (removal nodes that have
1853 already been selected) will be required in order to handle all possible
1857 if not self._slot_collision_info:
1860 self._show_merge_list()
1863 msg.append("\n!!! Multiple package instances within a single " + \
1864 "package slot have been pulled\n")
1865 msg.append("!!! into the dependency graph, resulting" + \
1866 " in a slot conflict:\n\n")
1868 # Max number of parents shown, to avoid flooding the display.
1870 explanation_columns = 70
1872 for (slot_atom, root), slot_nodes \
1873 in self._slot_collision_info.iteritems():
1874 msg.append(str(slot_atom))
1877 for node in slot_nodes:
1879 msg.append(str(node))
1880 parent_atoms = self._parent_atoms.get(node)
1883 # Prefer conflict atoms over others.
1884 for parent_atom in parent_atoms:
1885 if len(pruned_list) >= max_parents:
1887 if parent_atom in self._slot_conflict_parent_atoms:
1888 pruned_list.add(parent_atom)
1890 # If this package was pulled in by conflict atoms then
1891 # show those alone since those are the most interesting.
1893 # When generating the pruned list, prefer instances
1894 # of DependencyArg over instances of Package.
1895 for parent_atom in parent_atoms:
1896 if len(pruned_list) >= max_parents:
1898 parent, atom = parent_atom
1899 if isinstance(parent, DependencyArg):
1900 pruned_list.add(parent_atom)
1901 # Prefer Packages instances that themselves have been
1902 # pulled into collision slots.
1903 for parent_atom in parent_atoms:
1904 if len(pruned_list) >= max_parents:
1906 parent, atom = parent_atom
1907 if isinstance(parent, Package) and \
1908 (parent.slot_atom, parent.root) \
1909 in self._slot_collision_info:
1910 pruned_list.add(parent_atom)
1911 for parent_atom in parent_atoms:
1912 if len(pruned_list) >= max_parents:
1914 pruned_list.add(parent_atom)
1915 omitted_parents = len(parent_atoms) - len(pruned_list)
1916 parent_atoms = pruned_list
1917 msg.append(" pulled in by\n")
1918 for parent_atom in parent_atoms:
1919 parent, atom = parent_atom
1920 msg.append(2*indent)
1921 if isinstance(parent,
1922 (PackageArg, AtomArg)):
1923 # For PackageArg and AtomArg types, it's
1924 # redundant to display the atom attribute.
1925 msg.append(str(parent))
1927 # Display the specific atom from SetArg or
1929 msg.append("%s required by %s" % (atom, parent))
1932 msg.append(2*indent)
1933 msg.append("(and %d more)\n" % omitted_parents)
1935 msg.append(" (no parents)\n")
1937 explanation = self._slot_conflict_explanation(slot_nodes)
1940 msg.append(indent + "Explanation:\n\n")
1941 for line in textwrap.wrap(explanation, explanation_columns):
1942 msg.append(2*indent + line + "\n")
1945 sys.stderr.write("".join(msg))
1948 explanations_for_all = explanations == len(self._slot_collision_info)
1950 if explanations_for_all or "--quiet" in self.myopts:
1954 msg.append("It may be possible to solve this problem ")
1955 msg.append("by using package.mask to prevent one of ")
1956 msg.append("those packages from being selected. ")
1957 msg.append("However, it is also possible that conflicting ")
1958 msg.append("dependencies exist such that they are impossible to ")
1959 msg.append("satisfy simultaneously. If such a conflict exists in ")
1960 msg.append("the dependencies of two different packages, then those ")
1961 msg.append("packages can not be installed simultaneously.")
1963 from formatter import AbstractFormatter, DumbWriter
1964 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
1966 f.add_flowing_data(x)
1970 msg.append("For more information, see MASKED PACKAGES ")
1971 msg.append("section in the emerge man page or refer ")
1972 msg.append("to the Gentoo Handbook.")
1974 f.add_flowing_data(x)
1978 def _slot_conflict_explanation(self, slot_nodes):
1980 When a slot conflict occurs due to USE deps, there are a few
1981 different cases to consider:
1983 1) New USE are correctly set but --newuse wasn't requested so an
1984 installed package with incorrect USE happened to get pulled
1985 into graph before the new one.
1987 2) New USE are incorrectly set but an installed package has correct
1988 USE so it got pulled into the graph, and a new instance also got
1989 pulled in due to --newuse or an upgrade.
1991 3) Multiple USE deps exist that can't be satisfied simultaneously,
1992 and multiple package instances got pulled into the same slot to
1993 satisfy the conflicting deps.
1995 Currently, explanations and suggested courses of action are generated
1996 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
1999 if len(slot_nodes) != 2:
2000 # Suggestions are only implemented for
2001 # conflicts between two packages.
2004 all_conflict_atoms = self._slot_conflict_parent_atoms
2006 matched_atoms = None
2007 unmatched_node = None
2008 for node in slot_nodes:
2009 parent_atoms = self._parent_atoms.get(node)
2010 if not parent_atoms:
2011 # Normally, there are always parent atoms. If there are
2012 # none then something unexpected is happening and there's
2013 # currently no suggestion for this case.
2015 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
2016 for parent_atom in conflict_atoms:
2017 parent, atom = parent_atom
2019 # Suggestions are currently only implemented for cases
2020 # in which all conflict atoms have USE deps.
2023 if matched_node is not None:
2024 # If conflict atoms match multiple nodes
2025 # then there's no suggestion.
2028 matched_atoms = conflict_atoms
2030 if unmatched_node is not None:
2031 # Neither node is matched by conflict atoms, and
2032 # there is no suggestion for this case.
2034 unmatched_node = node
2036 if matched_node is None or unmatched_node is None:
2037 # This shouldn't happen.
2040 if unmatched_node.installed and not matched_node.installed and \
2041 unmatched_node.cpv == matched_node.cpv:
2042 # If the conflicting packages are the same version then
2043 # --newuse should be all that's needed. If they are different
2044 # versions then there's some other problem.
2045 return "New USE are correctly set, but --newuse wasn't" + \
2046 " requested, so an installed package with incorrect USE " + \
2047 "happened to get pulled into the dependency graph. " + \
2048 "In order to solve " + \
2049 "this, either specify the --newuse option or explicitly " + \
2050 " reinstall '%s'." % matched_node.slot_atom
2052 if matched_node.installed and not unmatched_node.installed:
2053 atoms = sorted(set(atom for parent, atom in matched_atoms))
2054 explanation = ("New USE for '%s' are incorrectly set. " + \
2055 "In order to solve this, adjust USE to satisfy '%s'") % \
2056 (matched_node.slot_atom, atoms[0])
2058 for atom in atoms[1:-1]:
2059 explanation += ", '%s'" % (atom,)
2062 explanation += " and '%s'" % (atoms[-1],)
2068 def _process_slot_conflicts(self):
2070 Process slot conflict data to identify specific atoms which
2071 lead to conflict. These atoms only match a subset of the
2072 packages that have been pulled into a given slot.
2074 for (slot_atom, root), slot_nodes \
2075 in self._slot_collision_info.iteritems():
2077 all_parent_atoms = set()
2078 for pkg in slot_nodes:
2079 parent_atoms = self._parent_atoms.get(pkg)
2080 if not parent_atoms:
2082 all_parent_atoms.update(parent_atoms)
2084 for pkg in slot_nodes:
2085 parent_atoms = self._parent_atoms.get(pkg)
2086 if parent_atoms is None:
2087 parent_atoms = set()
2088 self._parent_atoms[pkg] = parent_atoms
2089 for parent_atom in all_parent_atoms:
2090 if parent_atom in parent_atoms:
2092 # Use package set for matching since it will match via
2093 # PROVIDE when necessary, while match_from_list does not.
2094 parent, atom = parent_atom
2095 atom_set = InternalPackageSet(
2096 initial_atoms=(atom,))
2097 if atom_set.findAtomForPackage(pkg):
2098 parent_atoms.add(parent_atom)
2100 self._slot_conflict_parent_atoms.add(parent_atom)
2102 def _reinstall_for_flags(self, forced_flags,
2103 orig_use, orig_iuse, cur_use, cur_iuse):
2104 """Return a set of flags that trigger reinstallation, or None if there
2105 are no such flags."""
2106 if "--newuse" in self.myopts:
2107 flags = set(orig_iuse.symmetric_difference(
2108 cur_iuse).difference(forced_flags))
2109 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2110 cur_iuse.intersection(cur_use)))
2113 elif "changed-use" == self.myopts.get("--reinstall"):
2114 flags = orig_iuse.intersection(orig_use).symmetric_difference(
2115 cur_iuse.intersection(cur_use))
2120 def _create_graph(self, allow_unsatisfied=False):
2121 dep_stack = self._dep_stack
2122 dep_disjunctive_stack = self._dep_disjunctive_stack
2123 while dep_stack or dep_disjunctive_stack:
2124 self.spinner.update()
2126 dep = dep_stack.pop()
2127 if isinstance(dep, Package):
2128 if not self._add_pkg_deps(dep,
2129 allow_unsatisfied=allow_unsatisfied):
2132 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2134 if dep_disjunctive_stack:
2135 if not self._pop_disjunction(allow_unsatisfied):
2139 def _add_dep(self, dep, allow_unsatisfied=False):
2140 debug = "--debug" in self.myopts
2141 buildpkgonly = "--buildpkgonly" in self.myopts
2142 nodeps = "--nodeps" in self.myopts
2143 empty = "empty" in self.myparams
2144 deep = "deep" in self.myparams
2145 update = "--update" in self.myopts and dep.depth <= 1
2147 if not buildpkgonly and \
2149 dep.parent not in self._slot_collision_nodes:
2150 if dep.parent.onlydeps:
2151 # It's safe to ignore blockers if the
2152 # parent is an --onlydeps node.
2154 # The blocker applies to the root where
2155 # the parent is or will be installed.
2156 blocker = Blocker(atom=dep.atom,
2157 eapi=dep.parent.metadata["EAPI"],
2158 root=dep.parent.root)
2159 self._blocker_parents.add(blocker, dep.parent)
2161 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2162 onlydeps=dep.onlydeps)
2164 if dep.priority.optional:
2165 # This could be an unecessary build-time dep
2166 # pulled in by --with-bdeps=y.
2168 if allow_unsatisfied:
2169 self._unsatisfied_deps.append(dep)
2171 self._unsatisfied_deps_for_display.append(
2172 ((dep.root, dep.atom), {"myparent":dep.parent}))
2174 # In some cases, dep_check will return deps that shouldn't
2175 # be proccessed any further, so they are identified and
2176 # discarded here. Try to discard as few as possible since
2177 # discarded dependencies reduce the amount of information
2178 # available for optimization of merge order.
2179 if dep.priority.satisfied and \
2180 not dep_pkg.installed and \
2181 not (existing_node or empty or deep or update):
2183 if dep.root == self.target_root:
2185 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2186 except StopIteration:
2188 except portage.exception.InvalidDependString:
2189 if not dep_pkg.installed:
2190 # This shouldn't happen since the package
2191 # should have been masked.
2194 self._ignored_deps.append(dep)
2197 if not self._add_pkg(dep_pkg, dep):
2201 def _add_pkg(self, pkg, dep):
2208 myparent = dep.parent
2209 priority = dep.priority
2211 if priority is None:
2212 priority = DepPriority()
2214 Fills the digraph with nodes comprised of packages to merge.
2215 mybigkey is the package spec of the package to merge.
2216 myparent is the package depending on mybigkey ( or None )
2217 addme = Should we add this package to the digraph or are we just looking at it's deps?
2218 Think --onlydeps, we need to ignore packages in that case.
2221 #IUSE-aware emerge -> USE DEP aware depgraph
2222 #"no downgrade" emerge
2224 # Ensure that the dependencies of the same package
2225 # are never processed more than once.
2226 previously_added = pkg in self.digraph
2228 # select the correct /var database that we'll be checking against
2229 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2230 pkgsettings = self.pkgsettings[pkg.root]
2235 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2236 except portage.exception.InvalidDependString, e:
2237 if not pkg.installed:
2238 show_invalid_depstring_notice(
2239 pkg, pkg.metadata["PROVIDE"], str(e))
2243 if not pkg.onlydeps:
2244 if not pkg.installed and \
2245 "empty" not in self.myparams and \
2246 vardbapi.match(pkg.slot_atom):
2247 # Increase the priority of dependencies on packages that
2248 # are being rebuilt. This optimizes merge order so that
2249 # dependencies are rebuilt/updated as soon as possible,
2250 # which is needed especially when emerge is called by
2251 # revdep-rebuild since dependencies may be affected by ABI
2252 # breakage that has rendered them useless. Don't adjust
2253 # priority here when in "empty" mode since all packages
2254 # are being merged in that case.
2255 priority.rebuild = True
2257 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2258 slot_collision = False
2260 existing_node_matches = pkg.cpv == existing_node.cpv
2261 if existing_node_matches and \
2262 pkg != existing_node and \
2263 dep.atom is not None:
2264 # Use package set for matching since it will match via
2265 # PROVIDE when necessary, while match_from_list does not.
2266 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
2267 if not atom_set.findAtomForPackage(existing_node):
2268 existing_node_matches = False
2269 if existing_node_matches:
2270 # The existing node can be reused.
2272 for parent_atom in arg_atoms:
2273 parent, atom = parent_atom
2274 self.digraph.add(existing_node, parent,
2276 self._add_parent_atom(existing_node, parent_atom)
2277 # If a direct circular dependency is not an unsatisfied
2278 # buildtime dependency then drop it here since otherwise
2279 # it can skew the merge order calculation in an unwanted
2281 if existing_node != myparent or \
2282 (priority.buildtime and not priority.satisfied):
2283 self.digraph.addnode(existing_node, myparent,
2285 if dep.atom is not None and dep.parent is not None:
2286 self._add_parent_atom(existing_node,
2287 (dep.parent, dep.atom))
2291 # A slot collision has occurred. Sometimes this coincides
2292 # with unresolvable blockers, so the slot collision will be
2293 # shown later if there are no unresolvable blockers.
2294 self._add_slot_conflict(pkg)
2295 slot_collision = True
2298 # Now add this node to the graph so that self.display()
2299 # can show use flags and --tree portage.output. This node is
2300 # only being partially added to the graph. It must not be
2301 # allowed to interfere with the other nodes that have been
2302 # added. Do not overwrite data for existing nodes in
2303 # self.mydbapi since that data will be used for blocker
2305 # Even though the graph is now invalid, continue to process
2306 # dependencies so that things like --fetchonly can still
2307 # function despite collisions.
2309 elif not previously_added:
2310 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2311 self.mydbapi[pkg.root].cpv_inject(pkg)
2312 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
2314 if not pkg.installed:
2315 # Allow this package to satisfy old-style virtuals in case it
2316 # doesn't already. Any pre-existing providers will be preferred
2319 pkgsettings.setinst(pkg.cpv, pkg.metadata)
2320 # For consistency, also update the global virtuals.
2321 settings = self.roots[pkg.root].settings
2323 settings.setinst(pkg.cpv, pkg.metadata)
2325 except portage.exception.InvalidDependString, e:
2326 show_invalid_depstring_notice(
2327 pkg, pkg.metadata["PROVIDE"], str(e))
2332 self._set_nodes.add(pkg)
2334 # Do this even when addme is False (--onlydeps) so that the
2335 # parent/child relationship is always known in case
2336 # self._show_slot_collision_notice() needs to be called later.
2337 self.digraph.add(pkg, myparent, priority=priority)
2338 if dep.atom is not None and dep.parent is not None:
2339 self._add_parent_atom(pkg, (dep.parent, dep.atom))
2342 for parent_atom in arg_atoms:
2343 parent, atom = parent_atom
2344 self.digraph.add(pkg, parent, priority=priority)
2345 self._add_parent_atom(pkg, parent_atom)
2347 """ This section determines whether we go deeper into dependencies or not.
2348 We want to go deeper on a few occasions:
2349 Installing package A, we need to make sure package A's deps are met.
2350 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2351 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2353 dep_stack = self._dep_stack
2354 if "recurse" not in self.myparams:
2356 elif pkg.installed and \
2357 "deep" not in self.myparams:
2358 dep_stack = self._ignored_deps
2360 self.spinner.update()
2365 if not previously_added:
2366 dep_stack.append(pkg)
2369 def _add_parent_atom(self, pkg, parent_atom):
2370 parent_atoms = self._parent_atoms.get(pkg)
2371 if parent_atoms is None:
2372 parent_atoms = set()
2373 self._parent_atoms[pkg] = parent_atoms
2374 parent_atoms.add(parent_atom)
2376 def _add_slot_conflict(self, pkg):
2377 self._slot_collision_nodes.add(pkg)
2378 slot_key = (pkg.slot_atom, pkg.root)
2379 slot_nodes = self._slot_collision_info.get(slot_key)
2380 if slot_nodes is None:
2382 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
2383 self._slot_collision_info[slot_key] = slot_nodes
2386 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2388 mytype = pkg.type_name
2391 metadata = pkg.metadata
2392 myuse = pkg.use.enabled
2394 depth = pkg.depth + 1
2395 removal_action = "remove" in self.myparams
2398 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2400 edepend[k] = metadata[k]
2402 if not pkg.built and \
2403 "--buildpkgonly" in self.myopts and \
2404 "deep" not in self.myparams and \
2405 "empty" not in self.myparams:
2406 edepend["RDEPEND"] = ""
2407 edepend["PDEPEND"] = ""
2408 bdeps_optional = False
2410 if pkg.built and not removal_action:
2411 if self.myopts.get("--with-bdeps", "n") == "y":
2412 # Pull in build time deps as requested, but marked them as
2413 # "optional" since they are not strictly required. This allows
2414 # more freedom in the merge order calculation for solving
2415 # circular dependencies. Don't convert to PDEPEND since that
2416 # could make --with-bdeps=y less effective if it is used to
2417 # adjust merge order to prevent built_with_use() calls from
2419 bdeps_optional = True
2421 # built packages do not have build time dependencies.
2422 edepend["DEPEND"] = ""
2424 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
2425 edepend["DEPEND"] = ""
2428 root_deps = self.myopts.get("--root-deps")
2429 if root_deps is not None:
2430 if root_deps is True:
2432 elif root_deps == "rdeps":
2433 edepend["DEPEND"] = ""
2436 (bdeps_root, edepend["DEPEND"],
2437 self._priority(buildtime=(not bdeps_optional),
2438 optional=bdeps_optional)),
2439 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
2440 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
2443 debug = "--debug" in self.myopts
2444 strict = mytype != "installed"
2447 portage.dep._dep_check_strict = False
2449 for dep_root, dep_string, dep_priority in deps:
2454 print "Parent: ", jbigkey
2455 print "Depstring:", dep_string
2456 print "Priority:", dep_priority
2460 dep_string = portage.dep.paren_normalize(
2461 portage.dep.use_reduce(
2462 portage.dep.paren_reduce(dep_string),
2463 uselist=pkg.use.enabled))
2465 dep_string = list(self._queue_disjunctive_deps(
2466 pkg, dep_root, dep_priority, dep_string))
2468 except portage.exception.InvalidDependString, e:
2472 show_invalid_depstring_notice(pkg, dep_string, str(e))
2478 dep_string = portage.dep.paren_enclose(dep_string)
2480 if not self._add_pkg_dep_string(
2481 pkg, dep_root, dep_priority, dep_string,
2485 except portage.exception.AmbiguousPackageName, e:
2487 portage.writemsg("\n\n!!! An atom in the dependencies " + \
2488 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2490 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2491 portage.writemsg("\n", noiselevel=-1)
2492 if mytype == "binary":
2494 "!!! This binary package cannot be installed: '%s'\n" % \
2495 mykey, noiselevel=-1)
2496 elif mytype == "ebuild":
2497 portdb = self.roots[myroot].trees["porttree"].dbapi
2498 myebuild, mylocation = portdb.findname2(mykey)
2499 portage.writemsg("!!! This ebuild cannot be installed: " + \
2500 "'%s'\n" % myebuild, noiselevel=-1)
2501 portage.writemsg("!!! Please notify the package maintainer " + \
2502 "that atoms must be fully-qualified.\n", noiselevel=-1)
2505 portage.dep._dep_check_strict = True
2508 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2510 depth = pkg.depth + 1
2511 debug = "--debug" in self.myopts
2512 strict = pkg.type_name != "installed"
2516 print "Parent: ", pkg
2517 print "Depstring:", dep_string
2518 print "Priority:", dep_priority
2521 selected_atoms = self._select_atoms(dep_root,
2522 dep_string, myuse=pkg.use.enabled, parent=pkg,
2523 strict=strict, priority=dep_priority)
2524 except portage.exception.InvalidDependString, e:
2525 show_invalid_depstring_notice(pkg, dep_string, str(e))
2532 print "Candidates:", selected_atoms
2534 vardb = self.roots[dep_root].trees["vartree"].dbapi
2536 for atom in selected_atoms:
2539 atom = portage.dep.Atom(atom)
2541 mypriority = dep_priority.copy()
2542 if not atom.blocker and vardb.match(atom):
2543 mypriority.satisfied = True
2545 if not self._add_dep(Dependency(atom=atom,
2546 blocker=atom.blocker, depth=depth, parent=pkg,
2547 priority=mypriority, root=dep_root),
2548 allow_unsatisfied=allow_unsatisfied):
2551 except portage.exception.InvalidAtom, e:
2552 show_invalid_depstring_notice(
2553 pkg, dep_string, str(e))
2555 if not pkg.installed:
2559 print "Exiting...", pkg
2563 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2565 Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
2566 Yields non-disjunctive deps. Raises InvalidDependString when
2570 while i < len(dep_struct):
2572 if isinstance(x, list):
2573 for y in self._queue_disjunctive_deps(
2574 pkg, dep_root, dep_priority, x):
2577 self._queue_disjunction(pkg, dep_root, dep_priority,
2578 [ x, dep_struct[ i + 1 ] ] )
2582 x = portage.dep.Atom(x)
2583 except portage.exception.InvalidAtom:
2584 if not pkg.installed:
2585 raise portage.exception.InvalidDependString(
2586 "invalid atom: '%s'" % x)
2588 # Note: Eventually this will check for PROPERTIES=virtual
2589 # or whatever other metadata gets implemented for this
2591 if x.cp.startswith('virtual/'):
2592 self._queue_disjunction( pkg, dep_root,
2593 dep_priority, [ str(x) ] )
2598 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2599 self._dep_disjunctive_stack.append(
2600 (pkg, dep_root, dep_priority, dep_struct))
2602 def _pop_disjunction(self, allow_unsatisfied):
2604 Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
2605 populate self._dep_stack.
2607 pkg, dep_root, dep_priority, dep_struct = \
2608 self._dep_disjunctive_stack.pop()
2609 dep_string = portage.dep.paren_enclose(dep_struct)
2610 if not self._add_pkg_dep_string(
2611 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
2615 def _priority(self, **kwargs):
2616 if "remove" in self.myparams:
2617 priority_constructor = UnmergeDepPriority
2619 priority_constructor = DepPriority
2620 return priority_constructor(**kwargs)
2622 def _dep_expand(self, root_config, atom_without_category):
2624 @param root_config: a root config instance
2625 @type root_config: RootConfig
2626 @param atom_without_category: an atom without a category component
2627 @type atom_without_category: String
2629 @returns: a list of atoms containing categories (possibly empty)
2631 null_cp = portage.dep_getkey(insert_category_into_atom(
2632 atom_without_category, "null"))
2633 cat, atom_pn = portage.catsplit(null_cp)
2635 dbs = self._filtered_trees[root_config.root]["dbs"]
2637 for db, pkg_type, built, installed, db_keys in dbs:
2638 for cat in db.categories:
2639 if db.cp_list("%s/%s" % (cat, atom_pn)):
2643 for cat in categories:
2644 deps.append(insert_category_into_atom(
2645 atom_without_category, cat))
2648 def _have_new_virt(self, root, atom_cp):
2650 for db, pkg_type, built, installed, db_keys in \
2651 self._filtered_trees[root]["dbs"]:
2652 if db.cp_list(atom_cp):
2657 def _iter_atoms_for_pkg(self, pkg):
2658 # TODO: add multiple $ROOT support
2659 if pkg.root != self.target_root:
2661 atom_arg_map = self._atom_arg_map
2662 root_config = self.roots[pkg.root]
2663 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2664 atom_cp = portage.dep_getkey(atom)
2665 if atom_cp != pkg.cp and \
2666 self._have_new_virt(pkg.root, atom_cp):
2668 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2669 visible_pkgs.reverse() # descending order
2671 for visible_pkg in visible_pkgs:
2672 if visible_pkg.cp != atom_cp:
2674 if pkg >= visible_pkg:
2675 # This is descending order, and we're not
2676 # interested in any versions <= pkg given.
2678 if pkg.slot_atom != visible_pkg.slot_atom:
2679 higher_slot = visible_pkg
2681 if higher_slot is not None:
2683 for arg in atom_arg_map[(atom, pkg.root)]:
2684 if isinstance(arg, PackageArg) and \
2689 def select_files(self, myfiles):
2690 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2691 appropriate depgraph and return a favorite list."""
2692 debug = "--debug" in self.myopts
2693 root_config = self.roots[self.target_root]
2694 sets = root_config.sets
2695 getSetAtoms = root_config.setconfig.getSetAtoms
2697 myroot = self.target_root
2698 dbs = self._filtered_trees[myroot]["dbs"]
2699 vardb = self.trees[myroot]["vartree"].dbapi
2700 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
2701 portdb = self.trees[myroot]["porttree"].dbapi
2702 bindb = self.trees[myroot]["bintree"].dbapi
2703 pkgsettings = self.pkgsettings[myroot]
2705 onlydeps = "--onlydeps" in self.myopts
2708 ext = os.path.splitext(x)[1]
2710 if not os.path.exists(x):
2712 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2713 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2714 elif os.path.exists(
2715 os.path.join(pkgsettings["PKGDIR"], x)):
2716 x = os.path.join(pkgsettings["PKGDIR"], x)
2718 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2719 print "!!! Please ensure the tbz2 exists as specified.\n"
2720 return 0, myfavorites
2721 mytbz2=portage.xpak.tbz2(x)
2722 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2723 if os.path.realpath(x) != \
2724 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2725 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2726 return 0, myfavorites
2727 db_keys = list(bindb._aux_cache_keys)
2728 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
2729 pkg = Package(type_name="binary", root_config=root_config,
2730 cpv=mykey, built=True, metadata=metadata,
2732 self._pkg_cache[pkg] = pkg
2733 args.append(PackageArg(arg=x, package=pkg,
2734 root_config=root_config))
2735 elif ext==".ebuild":
2736 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2737 pkgdir = os.path.dirname(ebuild_path)
2738 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2739 cp = pkgdir[len(tree_root)+1:]
2740 e = portage.exception.PackageNotFound(
2741 ("%s is not in a valid portage tree " + \
2742 "hierarchy or does not exist") % x)
2743 if not portage.isvalidatom(cp):
2745 cat = portage.catsplit(cp)[0]
2746 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2747 if not portage.isvalidatom("="+mykey):
2749 ebuild_path = portdb.findname(mykey)
2751 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2752 cp, os.path.basename(ebuild_path)):
2753 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2754 return 0, myfavorites
2755 if mykey not in portdb.xmatch(
2756 "match-visible", portage.dep_getkey(mykey)):
2757 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2758 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2759 print colorize("BAD", "*** page for details.")
2760 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2763 raise portage.exception.PackageNotFound(
2764 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2765 db_keys = list(portdb._aux_cache_keys)
2766 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
2767 pkg = Package(type_name="ebuild", root_config=root_config,
2768 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2769 pkgsettings.setcpv(pkg)
2770 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
2771 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
2772 self._pkg_cache[pkg] = pkg
2773 args.append(PackageArg(arg=x, package=pkg,
2774 root_config=root_config))
2775 elif x.startswith(os.path.sep):
2776 if not x.startswith(myroot):
2777 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2778 " $ROOT.\n") % x, noiselevel=-1)
2780 # Queue these up since it's most efficient to handle
2781 # multiple files in a single iter_owners() call.
2782 lookup_owners.append(x)
2784 if x in ("system", "world"):
2786 if x.startswith(SETPREFIX):
2787 s = x[len(SETPREFIX):]
2789 raise portage.exception.PackageSetNotFound(s)
2792 # Recursively expand sets so that containment tests in
2793 # self._get_parent_sets() properly match atoms in nested
2794 # sets (like if world contains system).
2795 expanded_set = InternalPackageSet(
2796 initial_atoms=getSetAtoms(s))
2797 self._sets[s] = expanded_set
2798 args.append(SetArg(arg=x, set=expanded_set,
2799 root_config=root_config))
2801 if not is_valid_package_atom(x):
2802 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2804 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2805 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2807 # Don't expand categories or old-style virtuals here unless
2808 # necessary. Expansion of old-style virtuals here causes at
2809 # least the following problems:
2810 # 1) It's more difficult to determine which set(s) an atom
2811 # came from, if any.
2812 # 2) It takes away freedom from the resolver to choose other
2813 # possible expansions when necessary.
2815 args.append(AtomArg(arg=x, atom=x,
2816 root_config=root_config))
2818 expanded_atoms = self._dep_expand(root_config, x)
2819 installed_cp_set = set()
2820 for atom in expanded_atoms:
2821 atom_cp = portage.dep_getkey(atom)
2822 if vardb.cp_list(atom_cp):
2823 installed_cp_set.add(atom_cp)
2825 if len(installed_cp_set) > 1:
2826 non_virtual_cps = set()
2827 for atom_cp in installed_cp_set:
2828 if not atom_cp.startswith("virtual/"):
2829 non_virtual_cps.add(atom_cp)
2830 if len(non_virtual_cps) == 1:
2831 installed_cp_set = non_virtual_cps
2833 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2834 installed_cp = iter(installed_cp_set).next()
2835 expanded_atoms = [atom for atom in expanded_atoms \
2836 if portage.dep_getkey(atom) == installed_cp]
2838 if len(expanded_atoms) > 1:
2841 ambiguous_package_name(x, expanded_atoms, root_config,
2842 self.spinner, self.myopts)
2843 return False, myfavorites
2845 atom = expanded_atoms[0]
2847 null_atom = insert_category_into_atom(x, "null")
2848 null_cp = portage.dep_getkey(null_atom)
2849 cat, atom_pn = portage.catsplit(null_cp)
2850 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2852 # Allow the depgraph to choose which virtual.
2853 atom = insert_category_into_atom(x, "virtual")
2855 atom = insert_category_into_atom(x, "null")
2857 args.append(AtomArg(arg=x, atom=atom,
2858 root_config=root_config))
2862 search_for_multiple = False
2863 if len(lookup_owners) > 1:
2864 search_for_multiple = True
2866 for x in lookup_owners:
2867 if not search_for_multiple and os.path.isdir(x):
2868 search_for_multiple = True
2869 relative_paths.append(x[len(myroot):])
2872 for pkg, relative_path in \
2873 real_vardb._owners.iter_owners(relative_paths):
2874 owners.add(pkg.mycpv)
2875 if not search_for_multiple:
2879 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2880 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2884 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2886 # portage now masks packages with missing slot, but it's
2887 # possible that one was installed by an older version
2888 atom = portage.cpv_getkey(cpv)
2890 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
2891 args.append(AtomArg(arg=atom, atom=atom,
2892 root_config=root_config))
2894 if "--update" in self.myopts:
2895 # In some cases, the greedy slots behavior can pull in a slot that
2896 # the user would want to uninstall due to it being blocked by a
2897 # newer version in a different slot. Therefore, it's necessary to
2898 # detect and discard any that should be uninstalled. Each time
2899 # that arguments are updated, package selections are repeated in
2900 # order to ensure consistency with the current arguments:
2902 # 1) Initialize args
2903 # 2) Select packages and generate initial greedy atoms
2904 # 3) Update args with greedy atoms
2905 # 4) Select packages and generate greedy atoms again, while
2906 # accounting for any blockers between selected packages
2907 # 5) Update args with revised greedy atoms
2909 self._set_args(args)
2912 greedy_args.append(arg)
2913 if not isinstance(arg, AtomArg):
2915 for atom in self._greedy_slots(arg.root_config, arg.atom):
2917 AtomArg(arg=arg.arg, atom=atom,
2918 root_config=arg.root_config))
2920 self._set_args(greedy_args)
2923 # Revise greedy atoms, accounting for any blockers
2924 # between selected packages.
2925 revised_greedy_args = []
2927 revised_greedy_args.append(arg)
2928 if not isinstance(arg, AtomArg):
2930 for atom in self._greedy_slots(arg.root_config, arg.atom,
2931 blocker_lookahead=True):
2932 revised_greedy_args.append(
2933 AtomArg(arg=arg.arg, atom=atom,
2934 root_config=arg.root_config))
2935 args = revised_greedy_args
2936 del revised_greedy_args
2938 self._set_args(args)
2940 myfavorites = set(myfavorites)
2942 if isinstance(arg, (AtomArg, PackageArg)):
2943 myfavorites.add(arg.atom)
2944 elif isinstance(arg, SetArg):
2945 myfavorites.add(arg.arg)
2946 myfavorites = list(myfavorites)
2948 pprovideddict = pkgsettings.pprovideddict
2950 portage.writemsg("\n", noiselevel=-1)
2951 # Order needs to be preserved since a feature of --nodeps
2952 # is to allow the user to force a specific merge order.
2956 for atom in arg.set:
2957 self.spinner.update()
2958 dep = Dependency(atom=atom, onlydeps=onlydeps,
2959 root=myroot, parent=arg)
2960 atom_cp = portage.dep_getkey(atom)
2962 pprovided = pprovideddict.get(portage.dep_getkey(atom))
2963 if pprovided and portage.match_from_list(atom, pprovided):
2964 # A provided package has been specified on the command line.
2965 self._pprovided_args.append((arg, atom))
2967 if isinstance(arg, PackageArg):
2968 if not self._add_pkg(arg.package, dep) or \
2969 not self._create_graph():
2970 sys.stderr.write(("\n\n!!! Problem resolving " + \
2971 "dependencies for %s\n") % arg.arg)
2972 return 0, myfavorites
2975 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
2976 (arg, atom), noiselevel=-1)
2977 pkg, existing_node = self._select_package(
2978 myroot, atom, onlydeps=onlydeps)
2980 if not (isinstance(arg, SetArg) and \
2981 arg.name in ("system", "world")):
2982 self._unsatisfied_deps_for_display.append(
2983 ((myroot, atom), {}))
2984 return 0, myfavorites
2985 self._missing_args.append((arg, atom))
2987 if atom_cp != pkg.cp:
2988 # For old-style virtuals, we need to repeat the
2989 # package.provided check against the selected package.
2990 expanded_atom = atom.replace(atom_cp, pkg.cp)
2991 pprovided = pprovideddict.get(pkg.cp)
2993 portage.match_from_list(expanded_atom, pprovided):
2994 # A provided package has been
2995 # specified on the command line.
2996 self._pprovided_args.append((arg, atom))
2998 if pkg.installed and "selective" not in self.myparams:
2999 self._unsatisfied_deps_for_display.append(
3000 ((myroot, atom), {}))
3001 # Previous behavior was to bail out in this case, but
3002 # since the dep is satisfied by the installed package,
3003 # it's more friendly to continue building the graph
3004 # and just show a warning message. Therefore, only bail
3005 # out here if the atom is not from either the system or
3007 if not (isinstance(arg, SetArg) and \
3008 arg.name in ("system", "world")):
3009 return 0, myfavorites
3011 # Add the selected package to the graph as soon as possible
3012 # so that later dep_check() calls can use it as feedback
3013 # for making more consistent atom selections.
3014 if not self._add_pkg(pkg, dep):
3015 if isinstance(arg, SetArg):
3016 sys.stderr.write(("\n\n!!! Problem resolving " + \
3017 "dependencies for %s from %s\n") % \
3020 sys.stderr.write(("\n\n!!! Problem resolving " + \
3021 "dependencies for %s\n") % atom)
3022 return 0, myfavorites
3024 except portage.exception.MissingSignature, e:
3025 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
3026 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3027 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3028 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3029 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3030 return 0, myfavorites
3031 except portage.exception.InvalidSignature, e:
3032 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
3033 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3034 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3035 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3036 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3037 return 0, myfavorites
3038 except SystemExit, e:
3039 raise # Needed else can't exit
3040 except Exception, e:
3041 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
3042 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
3045 # Now that the root packages have been added to the graph,
3046 # process the dependencies.
3047 if not self._create_graph():
3048 return 0, myfavorites
3051 if "--usepkgonly" in self.myopts:
3052 for xs in self.digraph.all_nodes():
3053 if not isinstance(xs, Package):
3055 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
3059 print "Missing binary for:",xs[2]
3063 except self._unknown_internal_error:
3064 return False, myfavorites
3066 # We're true here unless we are missing binaries.
3067 return (not missing,myfavorites)
3069 def _set_args(self, args):
3071 Create the "args" package set from atoms and packages given as
3072 arguments. This method can be called multiple times if necessary.
3073 The package selection cache is automatically invalidated, since
3074 arguments influence package selections.
3076 args_set = self._sets["args"]
3079 if not isinstance(arg, (AtomArg, PackageArg)):
3082 if atom in args_set:
3086 self._set_atoms.clear()
3087 self._set_atoms.update(chain(*self._sets.itervalues()))
3088 atom_arg_map = self._atom_arg_map
3089 atom_arg_map.clear()
3091 for atom in arg.set:
3092 atom_key = (atom, arg.root_config.root)
3093 refs = atom_arg_map.get(atom_key)
3096 atom_arg_map[atom_key] = refs
3100 # Invalidate the package selection cache, since
3101 # arguments influence package selections.
3102 self._highest_pkg_cache.clear()
3103 for trees in self._filtered_trees.itervalues():
3104 trees["porttree"].dbapi._clear_cache()
3106 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3108 Return a list of slot atoms corresponding to installed slots that
3109 differ from the slot of the highest visible match. When
3110 blocker_lookahead is True, slot atoms that would trigger a blocker
3111 conflict are automatically discarded, potentially allowing automatic
3112 uninstallation of older slots when appropriate.
3114 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3115 if highest_pkg is None:
3117 vardb = root_config.trees["vartree"].dbapi
3119 for cpv in vardb.match(atom):
3120 # don't mix new virtuals with old virtuals
3121 if portage.cpv_getkey(cpv) == highest_pkg.cp:
3122 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
3124 slots.add(highest_pkg.metadata["SLOT"])
3128 slots.remove(highest_pkg.metadata["SLOT"])
3131 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3132 pkg, in_graph = self._select_package(root_config.root, slot_atom)
3133 if pkg is not None and \
3134 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3135 greedy_pkgs.append(pkg)
3138 if not blocker_lookahead:
3139 return [pkg.slot_atom for pkg in greedy_pkgs]
3142 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
3143 for pkg in greedy_pkgs + [highest_pkg]:
3144 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
3146 atoms = self._select_atoms(
3147 pkg.root, dep_str, pkg.use.enabled,
3148 parent=pkg, strict=True)
3149 except portage.exception.InvalidDependString:
3151 blocker_atoms = (x for x in atoms if x.blocker)
3152 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3154 if highest_pkg not in blockers:
3157 # filter packages with invalid deps
3158 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3160 # filter packages that conflict with highest_pkg
3161 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3162 (blockers[highest_pkg].findAtomForPackage(pkg) or \
3163 blockers[pkg].findAtomForPackage(highest_pkg))]
3168 # If two packages conflict, discard the lower version.
3169 discard_pkgs = set()
3170 greedy_pkgs.sort(reverse=True)
3171 for i in xrange(len(greedy_pkgs) - 1):
3172 pkg1 = greedy_pkgs[i]
3173 if pkg1 in discard_pkgs:
3175 for j in xrange(i + 1, len(greedy_pkgs)):
3176 pkg2 = greedy_pkgs[j]
3177 if pkg2 in discard_pkgs:
3179 if blockers[pkg1].findAtomForPackage(pkg2) or \
3180 blockers[pkg2].findAtomForPackage(pkg1):
3182 discard_pkgs.add(pkg2)
3184 return [pkg.slot_atom for pkg in greedy_pkgs \
3185 if pkg not in discard_pkgs]
3187 def _select_atoms_from_graph(self, *pargs, **kwargs):
3189 Prefer atoms matching packages that have already been
3190 added to the graph or those that are installed and have
3191 not been scheduled for replacement.
3193 kwargs["trees"] = self._graph_trees
3194 return self._select_atoms_highest_available(*pargs, **kwargs)
3196 def _select_atoms_highest_available(self, root, depstring,
3197 myuse=None, parent=None, strict=True, trees=None, priority=None):
3198 """This will raise InvalidDependString if necessary. If trees is
3199 None then self._filtered_trees is used."""
3200 pkgsettings = self.pkgsettings[root]
3202 trees = self._filtered_trees
3203 if not getattr(priority, "buildtime", False):
3204 # The parent should only be passed to dep_check() for buildtime
3205 # dependencies since that's the only case when it's appropriate
3206 # to trigger the circular dependency avoidance code which uses it.
3207 # It's important not to trigger the same circular dependency
3208 # avoidance code for runtime dependencies since it's not needed
3209 # and it can promote an incorrect package choice.
3213 if parent is not None:
3214 trees[root]["parent"] = parent
3216 portage.dep._dep_check_strict = False
3217 mycheck = portage.dep_check(depstring, None,
3218 pkgsettings, myuse=myuse,
3219 myroot=root, trees=trees)
3221 if parent is not None:
3222 trees[root].pop("parent")
3223 portage.dep._dep_check_strict = True
3225 raise portage.exception.InvalidDependString(mycheck[1])
3226 selected_atoms = mycheck[1]
3227 return selected_atoms
3229 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
3230 atom = portage.dep.Atom(atom)
3231 atom_set = InternalPackageSet(initial_atoms=(atom,))
3232 atom_without_use = atom
3234 atom_without_use = portage.dep.remove_slot(atom)
3236 atom_without_use += ":" + atom.slot
3237 atom_without_use = portage.dep.Atom(atom_without_use)
3238 xinfo = '"%s"' % atom
3241 # Discard null/ from failed cpv_expand category expansion.
3242 xinfo = xinfo.replace("null/", "")
3243 masked_packages = []
3245 masked_pkg_instances = set()
3246 missing_licenses = []
3247 have_eapi_mask = False
3248 pkgsettings = self.pkgsettings[root]
3249 implicit_iuse = pkgsettings._get_implicit_iuse()
3250 root_config = self.roots[root]
3251 portdb = self.roots[root].trees["porttree"].dbapi
3252 dbs = self._filtered_trees[root]["dbs"]
3253 for db, pkg_type, built, installed, db_keys in dbs:
3257 if hasattr(db, "xmatch"):
3258 cpv_list = db.xmatch("match-all", atom_without_use)
3260 cpv_list = db.match(atom_without_use)
3263 for cpv in cpv_list:
3264 metadata, mreasons = get_mask_info(root_config, cpv,
3265 pkgsettings, db, pkg_type, built, installed, db_keys)
3266 if metadata is not None:
3267 pkg = Package(built=built, cpv=cpv,
3268 installed=installed, metadata=metadata,
3269 root_config=root_config)
3270 if pkg.cp != atom.cp:
3271 # A cpv can be returned from dbapi.match() as an
3272 # old-style virtual match even in cases when the
3273 # package does not actually PROVIDE the virtual.
3274 # Filter out any such false matches here.
3275 if not atom_set.findAtomForPackage(pkg):
3278 masked_pkg_instances.add(pkg)
3280 missing_use.append(pkg)
3283 masked_packages.append(
3284 (root_config, pkgsettings, cpv, metadata, mreasons))
3286 missing_use_reasons = []
3287 missing_iuse_reasons = []
3288 for pkg in missing_use:
3289 use = pkg.use.enabled
3290 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
3291 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
3293 for x in atom.use.required:
3294 if iuse_re.match(x) is None:
3295 missing_iuse.append(x)
3298 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3299 missing_iuse_reasons.append((pkg, mreasons))
3301 need_enable = sorted(atom.use.enabled.difference(use))
3302 need_disable = sorted(atom.use.disabled.intersection(use))
3303 if need_enable or need_disable:
3305 changes.extend(colorize("red", "+" + x) \
3306 for x in need_enable)
3307 changes.extend(colorize("blue", "-" + x) \
3308 for x in need_disable)
3309 mreasons.append("Change USE: %s" % " ".join(changes))
3310 missing_use_reasons.append((pkg, mreasons))
3312 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3313 in missing_use_reasons if pkg not in masked_pkg_instances]
3315 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3316 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3318 show_missing_use = False
3319 if unmasked_use_reasons:
3320 # Only show the latest version.
3321 show_missing_use = unmasked_use_reasons[:1]
3322 elif unmasked_iuse_reasons:
3323 if missing_use_reasons:
3324 # All packages with required IUSE are masked,
3325 # so display a normal masking message.
3328 show_missing_use = unmasked_iuse_reasons
3330 if show_missing_use:
3331 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
3332 print "!!! One of the following packages is required to complete your request:"
3333 for pkg, mreasons in show_missing_use:
3334 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
3336 elif masked_packages:
3338 colorize("BAD", "All ebuilds that could satisfy ") + \
3339 colorize("INFORM", xinfo) + \
3340 colorize("BAD", " have been masked.")
3341 print "!!! One of the following masked packages is required to complete your request:"
3342 have_eapi_mask = show_masked_packages(masked_packages)
3345 msg = ("The current version of portage supports " + \
3346 "EAPI '%s'. You must upgrade to a newer version" + \
3347 " of portage before EAPI masked packages can" + \
3348 " be installed.") % portage.const.EAPI
3349 from textwrap import wrap
3350 for line in wrap(msg, 75):
3355 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
3357 # Show parent nodes and the argument that pulled them in.
3358 traversed_nodes = set()
3361 while node is not None:
3362 traversed_nodes.add(node)
3363 msg.append('(dependency required by "%s" [%s])' % \
3364 (colorize('INFORM', str(node.cpv)), node.type_name))
3365 # When traversing to parents, prefer arguments over packages
3366 # since arguments are root nodes. Never traverse the same
3367 # package twice, in order to prevent an infinite loop.
3368 selected_parent = None
3369 for parent in self.digraph.parent_nodes(node):
3370 if isinstance(parent, DependencyArg):
3371 msg.append('(dependency required by "%s" [argument])' % \
3372 (colorize('INFORM', str(parent))))
3373 selected_parent = None
3375 if parent not in traversed_nodes:
3376 selected_parent = parent
3377 node = selected_parent
3383 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3384 cache_key = (root, atom, onlydeps)
3385 ret = self._highest_pkg_cache.get(cache_key)
3388 if pkg and not existing:
3389 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
3390 if existing and existing == pkg:
3391 # Update the cache to reflect that the
3392 # package has been added to the graph.
3394 self._highest_pkg_cache[cache_key] = ret
3396 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3397 self._highest_pkg_cache[cache_key] = ret
3400 settings = pkg.root_config.settings
3401 if visible(settings, pkg) and not (pkg.installed and \
3402 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
3403 pkg.root_config.visible_pkgs.cpv_inject(pkg)
3406 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3407 root_config = self.roots[root]
3408 pkgsettings = self.pkgsettings[root]
3409 dbs = self._filtered_trees[root]["dbs"]
3410 vardb = self.roots[root].trees["vartree"].dbapi
3411 portdb = self.roots[root].trees["porttree"].dbapi
3412 # List of acceptable packages, ordered by type preference.
3413 matched_packages = []
3414 highest_version = None
3415 if not isinstance(atom, portage.dep.Atom):
3416 atom = portage.dep.Atom(atom)
3418 atom_set = InternalPackageSet(initial_atoms=(atom,))
3419 existing_node = None
3421 usepkgonly = "--usepkgonly" in self.myopts
3422 empty = "empty" in self.myparams
3423 selective = "selective" in self.myparams
3425 noreplace = "--noreplace" in self.myopts
3426 # Behavior of the "selective" parameter depends on
3427 # whether or not a package matches an argument atom.
3428 # If an installed package provides an old-style
3429 # virtual that is no longer provided by an available
3430 # package, the installed package may match an argument
3431 # atom even though none of the available packages do.
3432 # Therefore, "selective" logic does not consider
3433 # whether or not an installed package matches an
3434 # argument atom. It only considers whether or not
3435 # available packages match argument atoms, which is
3436 # represented by the found_available_arg flag.
3437 found_available_arg = False
3438 for find_existing_node in True, False:
3441 for db, pkg_type, built, installed, db_keys in dbs:
3444 if installed and not find_existing_node:
3445 want_reinstall = reinstall or empty or \
3446 (found_available_arg and not selective)
3447 if want_reinstall and matched_packages:
3449 if hasattr(db, "xmatch"):
3450 cpv_list = db.xmatch("match-all", atom)
3452 cpv_list = db.match(atom)
3454 # USE=multislot can make an installed package appear as if
3455 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3456 # won't do any good as long as USE=multislot is enabled since
3457 # the newly built package still won't have the expected slot.
3458 # Therefore, assume that such SLOT dependencies are already
3459 # satisfied rather than forcing a rebuild.
3460 if installed and not cpv_list and atom.slot:
3461 for cpv in db.match(atom.cp):
3462 slot_available = False
3463 for other_db, other_type, other_built, \
3464 other_installed, other_keys in dbs:
3467 other_db.aux_get(cpv, ["SLOT"])[0]:
3468 slot_available = True
3472 if not slot_available:
3474 inst_pkg = self._pkg(cpv, "installed",
3475 root_config, installed=installed)
3476 # Remove the slot from the atom and verify that
3477 # the package matches the resulting atom.
3478 atom_without_slot = portage.dep.remove_slot(atom)
3480 atom_without_slot += str(atom.use)
3481 atom_without_slot = portage.dep.Atom(atom_without_slot)
3482 if portage.match_from_list(
3483 atom_without_slot, [inst_pkg]):
3484 cpv_list = [inst_pkg.cpv]
3489 pkg_status = "merge"
3490 if installed or onlydeps:
3491 pkg_status = "nomerge"
3494 for cpv in cpv_list:
3495 # Make --noreplace take precedence over --newuse.
3496 if not installed and noreplace and \
3497 cpv in vardb.match(atom):
3498 # If the installed version is masked, it may
3499 # be necessary to look at lower versions,
3500 # in case there is a visible downgrade.
3502 reinstall_for_flags = None
3503 cache_key = (pkg_type, root, cpv, pkg_status)
3504 calculated_use = True
3505 pkg = self._pkg_cache.get(cache_key)
3507 calculated_use = False
3509 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3512 pkg = Package(built=built, cpv=cpv,
3513 installed=installed, metadata=metadata,
3514 onlydeps=onlydeps, root_config=root_config,
3516 metadata = pkg.metadata
3518 metadata['CHOST'] = pkgsettings.get('CHOST', '')
3519 if not built and ("?" in metadata["LICENSE"] or \
3520 "?" in metadata["PROVIDE"]):
3521 # This is avoided whenever possible because
3522 # it's expensive. It only needs to be done here
3523 # if it has an effect on visibility.
3524 pkgsettings.setcpv(pkg)
3525 metadata["USE"] = pkgsettings["PORTAGE_USE"]
3526 calculated_use = True
3527 self._pkg_cache[pkg] = pkg
3529 if not installed or (built and matched_packages):
3530 # Only enforce visibility on installed packages
3531 # if there is at least one other visible package
3532 # available. By filtering installed masked packages
3533 # here, packages that have been masked since they
3534 # were installed can be automatically downgraded
3535 # to an unmasked version.
3537 if not visible(pkgsettings, pkg):
3539 except portage.exception.InvalidDependString:
3543 # Enable upgrade or downgrade to a version
3544 # with visible KEYWORDS when the installed
3545 # version is masked by KEYWORDS, but never
3546 # reinstall the same exact version only due
3547 # to a KEYWORDS mask.
3548 if built and matched_packages:
3550 different_version = None
3551 for avail_pkg in matched_packages:
3552 if not portage.dep.cpvequal(
3553 pkg.cpv, avail_pkg.cpv):
3554 different_version = avail_pkg
3556 if different_version is not None:
3559 pkgsettings._getMissingKeywords(
3560 pkg.cpv, pkg.metadata):
3563 # If the ebuild no longer exists or it's
3564 # keywords have been dropped, reject built
3565 # instances (installed or binary).
3566 # If --usepkgonly is enabled, assume that
3567 # the ebuild status should be ignored.
3571 pkg.cpv, "ebuild", root_config)
3572 except portage.exception.PackageNotFound:
3575 if not visible(pkgsettings, pkg_eb):
3578 if not pkg.built and not calculated_use:
3579 # This is avoided whenever possible because
3581 pkgsettings.setcpv(pkg)
3582 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3584 if pkg.cp != atom.cp:
3585 # A cpv can be returned from dbapi.match() as an
3586 # old-style virtual match even in cases when the
3587 # package does not actually PROVIDE the virtual.
3588 # Filter out any such false matches here.
3589 if not atom_set.findAtomForPackage(pkg):
3593 if root == self.target_root:
3595 # Ebuild USE must have been calculated prior
3596 # to this point, in case atoms have USE deps.
3597 myarg = self._iter_atoms_for_pkg(pkg).next()
3598 except StopIteration:
3600 except portage.exception.InvalidDependString:
3602 # masked by corruption
3604 if not installed and myarg:
3605 found_available_arg = True
3607 if atom.use and not pkg.built:
3608 use = pkg.use.enabled
3609 if atom.use.enabled.difference(use):
3611 if atom.use.disabled.intersection(use):
3613 if pkg.cp == atom_cp:
3614 if highest_version is None:
3615 highest_version = pkg
3616 elif pkg > highest_version:
3617 highest_version = pkg
3618 # At this point, we've found the highest visible
3619 # match from the current repo. Any lower versions
3620 # from this repo are ignored, so this so the loop
3621 # will always end with a break statement below
3623 if find_existing_node:
3624 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3627 if portage.dep.match_from_list(atom, [e_pkg]):
3628 if highest_version and \
3629 e_pkg.cp == atom_cp and \
3630 e_pkg < highest_version and \
3631 e_pkg.slot_atom != highest_version.slot_atom:
3632 # There is a higher version available in a
3633 # different slot, so this existing node is
3637 matched_packages.append(e_pkg)
3638 existing_node = e_pkg
3640 # Compare built package to current config and
3641 # reject the built package if necessary.
3642 if built and not installed and \
3643 ("--newuse" in self.myopts or \
3644 "--reinstall" in self.myopts):
3645 iuses = pkg.iuse.all
3646 old_use = pkg.use.enabled
3648 pkgsettings.setcpv(myeb)
3650 pkgsettings.setcpv(pkg)
3651 now_use = pkgsettings["PORTAGE_USE"].split()
3652 forced_flags = set()
3653 forced_flags.update(pkgsettings.useforce)
3654 forced_flags.update(pkgsettings.usemask)
3656 if myeb and not usepkgonly:
3657 cur_iuse = myeb.iuse.all
3658 if self._reinstall_for_flags(forced_flags,
3662 # Compare current config to installed package
3663 # and do not reinstall if possible.
3664 if not installed and \
3665 ("--newuse" in self.myopts or \
3666 "--reinstall" in self.myopts) and \
3667 cpv in vardb.match(atom):
3668 pkgsettings.setcpv(pkg)
3669 forced_flags = set()
3670 forced_flags.update(pkgsettings.useforce)
3671 forced_flags.update(pkgsettings.usemask)
3672 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3673 old_iuse = set(filter_iuse_defaults(
3674 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3675 cur_use = pkg.use.enabled
3676 cur_iuse = pkg.iuse.all
3677 reinstall_for_flags = \
3678 self._reinstall_for_flags(
3679 forced_flags, old_use, old_iuse,
3681 if reinstall_for_flags:
3685 matched_packages.append(pkg)
3686 if reinstall_for_flags:
3687 self._reinstall_nodes[pkg] = \
3691 if not matched_packages:
3694 if "--debug" in self.myopts:
3695 for pkg in matched_packages:
3696 portage.writemsg("%s %s\n" % \
3697 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3699 # Filter out any old-style virtual matches if they are
3700 # mixed with new-style virtual matches.
3701 cp = portage.dep_getkey(atom)
3702 if len(matched_packages) > 1 and \
3703 "virtual" == portage.catsplit(cp)[0]:
3704 for pkg in matched_packages:
3707 # Got a new-style virtual, so filter
3708 # out any old-style virtuals.
3709 matched_packages = [pkg for pkg in matched_packages \
3713 if len(matched_packages) > 1:
3714 bestmatch = portage.best(
3715 [pkg.cpv for pkg in matched_packages])
3716 matched_packages = [pkg for pkg in matched_packages \
3717 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3719 # ordered by type preference ("ebuild" type is the last resort)
3720 return matched_packages[-1], existing_node
3722 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3724 Select packages that have already been added to the graph or
3725 those that are installed and have not been scheduled for
3728 graph_db = self._graph_trees[root]["porttree"].dbapi
3729 matches = graph_db.match_pkgs(atom)
3732 pkg = matches[-1] # highest match
3733 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
3734 return pkg, in_graph
3736 def _complete_graph(self):
3738 Add any deep dependencies of required sets (args, system, world) that
3739 have not been pulled into the graph yet. This ensures that the graph
3740 is consistent such that initially satisfied deep dependencies are not
3741 broken in the new graph. Initially unsatisfied dependencies are
3742 irrelevant since we only want to avoid breaking dependencies that are
3745 Since this method can consume enough time to disturb users, it is
3746 currently only enabled by the --complete-graph option.
3748 if "--buildpkgonly" in self.myopts or \
3749 "recurse" not in self.myparams:
3752 if "complete" not in self.myparams:
3753 # Skip this to avoid consuming enough time to disturb users.
3756 # Put the depgraph into a mode that causes it to only
3757 # select packages that have already been added to the
3758 # graph or those that are installed and have not been
3759 # scheduled for replacement. Also, toggle the "deep"
3760 # parameter so that all dependencies are traversed and
3762 self._select_atoms = self._select_atoms_from_graph
3763 self._select_package = self._select_pkg_from_graph
3764 already_deep = "deep" in self.myparams
3765 if not already_deep:
3766 self.myparams.add("deep")
3768 for root in self.roots:
3769 required_set_names = self._required_set_names.copy()
3770 if root == self.target_root and \
3771 (already_deep or "empty" in self.myparams):
3772 required_set_names.difference_update(self._sets)
3773 if not required_set_names and not self._ignored_deps:
3775 root_config = self.roots[root]
3776 setconfig = root_config.setconfig
3778 # Reuse existing SetArg instances when available.
3779 for arg in self.digraph.root_nodes():
3780 if not isinstance(arg, SetArg):
3782 if arg.root_config != root_config:
3784 if arg.name in required_set_names:
3786 required_set_names.remove(arg.name)
3787 # Create new SetArg instances only when necessary.
3788 for s in required_set_names:
3789 expanded_set = InternalPackageSet(
3790 initial_atoms=setconfig.getSetAtoms(s))
3791 atom = SETPREFIX + s
3792 args.append(SetArg(arg=atom, set=expanded_set,
3793 root_config=root_config))
3794 vardb = root_config.trees["vartree"].dbapi
3796 for atom in arg.set:
3797 self._dep_stack.append(
3798 Dependency(atom=atom, root=root, parent=arg))
3799 if self._ignored_deps:
3800 self._dep_stack.extend(self._ignored_deps)
3801 self._ignored_deps = []
3802 if not self._create_graph(allow_unsatisfied=True):
3804 # Check the unsatisfied deps to see if any initially satisfied deps
3805 # will become unsatisfied due to an upgrade. Initially unsatisfied
3806 # deps are irrelevant since we only want to avoid breaking deps
3807 # that are initially satisfied.
3808 while self._unsatisfied_deps:
3809 dep = self._unsatisfied_deps.pop()
3810 matches = vardb.match_pkgs(dep.atom)
3812 self._initially_unsatisfied_deps.append(dep)
3814 # An scheduled installation broke a deep dependency.
3815 # Add the installed package to the graph so that it
3816 # will be appropriately reported as a slot collision
3817 # (possibly solvable via backtracking).
3818 pkg = matches[-1] # highest match
3819 if not self._add_pkg(pkg, dep):
3821 if not self._create_graph(allow_unsatisfied=True):
3825 def _pkg(self, cpv, type_name, root_config, installed=False):
3827 Get a package instance from the cache, or create a new
3828 one if necessary. Raises KeyError from aux_get if it
3829 failures for some reason (package does not exist or is
3834 operation = "nomerge"
3835 pkg = self._pkg_cache.get(
3836 (type_name, root_config.root, cpv, operation))
3838 tree_type = self.pkg_tree_map[type_name]
3839 db = root_config.trees[tree_type].dbapi
3840 db_keys = list(self._trees_orig[root_config.root][
3841 tree_type].dbapi._aux_cache_keys)
3843 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3845 raise portage.exception.PackageNotFound(cpv)
3846 pkg = Package(cpv=cpv, metadata=metadata,
3847 root_config=root_config, installed=installed)
3848 if type_name == "ebuild":
3849 settings = self.pkgsettings[root_config.root]
3850 settings.setcpv(pkg)
3851 pkg.metadata["USE"] = settings["PORTAGE_USE"]
3852 pkg.metadata['CHOST'] = settings.get('CHOST', '')
3853 self._pkg_cache[pkg] = pkg
3856 def validate_blockers(self):
3857 """Remove any blockers from the digraph that do not match any of the
3858 packages within the graph. If necessary, create hard deps to ensure
3859 correct merge order such that mutually blocking packages are never
3860 installed simultaneously."""
3862 if "--buildpkgonly" in self.myopts or \
3863 "--nodeps" in self.myopts:
3866 #if "deep" in self.myparams:
3868 # Pull in blockers from all installed packages that haven't already
3869 # been pulled into the depgraph. This is not enabled by default
3870 # due to the performance penalty that is incurred by all the
3871 # additional dep_check calls that are required.
3873 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3874 for myroot in self.trees:
3875 vardb = self.trees[myroot]["vartree"].dbapi
3876 portdb = self.trees[myroot]["porttree"].dbapi
3877 pkgsettings = self.pkgsettings[myroot]
3878 final_db = self.mydbapi[myroot]
3880 blocker_cache = BlockerCache(myroot, vardb)
3881 stale_cache = set(blocker_cache)
3884 stale_cache.discard(cpv)
3885 pkg_in_graph = self.digraph.contains(pkg)
3887 # Check for masked installed packages. Only warn about
3888 # packages that are in the graph in order to avoid warning
3889 # about those that will be automatically uninstalled during
3890 # the merge process or by --depclean.
3892 if pkg_in_graph and not visible(pkgsettings, pkg):
3893 self._masked_installed.add(pkg)
3895 blocker_atoms = None
3901 self._blocker_parents.child_nodes(pkg))
3906 self._irrelevant_blockers.child_nodes(pkg))
3909 if blockers is not None:
3910 blockers = set(str(blocker.atom) \
3911 for blocker in blockers)
3913 # If this node has any blockers, create a "nomerge"
3914 # node for it so that they can be enforced.
3915 self.spinner.update()
3916 blocker_data = blocker_cache.get(cpv)
3917 if blocker_data is not None and \
3918 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3921 # If blocker data from the graph is available, use
3922 # it to validate the cache and update the cache if
3924 if blocker_data is not None and \
3925 blockers is not None:
3926 if not blockers.symmetric_difference(
3927 blocker_data.atoms):
3931 if blocker_data is None and \
3932 blockers is not None:
3933 # Re-use the blockers from the graph.
3934 blocker_atoms = sorted(blockers)
3935 counter = long(pkg.metadata["COUNTER"])
3937 blocker_cache.BlockerData(counter, blocker_atoms)
3938 blocker_cache[pkg.cpv] = blocker_data
3942 blocker_atoms = blocker_data.atoms
3944 # Use aux_get() to trigger FakeVartree global
3945 # updates on *DEPEND when appropriate.
3946 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3947 # It is crucial to pass in final_db here in order to
3948 # optimize dep_check calls by eliminating atoms via
3949 # dep_wordreduce and dep_eval calls.
3951 portage.dep._dep_check_strict = False
3953 success, atoms = portage.dep_check(depstr,
3954 final_db, pkgsettings, myuse=pkg.use.enabled,
3955 trees=self._graph_trees, myroot=myroot)
3956 except Exception, e:
3957 if isinstance(e, SystemExit):
3959 # This is helpful, for example, if a ValueError
3960 # is thrown from cpv_expand due to multiple
3961 # matches (this can happen if an atom lacks a
3963 show_invalid_depstring_notice(
3964 pkg, depstr, str(e))
3968 portage.dep._dep_check_strict = True
3970 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3971 if replacement_pkg and \
3972 replacement_pkg[0].operation == "merge":
3973 # This package is being replaced anyway, so
3974 # ignore invalid dependencies so as not to
3975 # annoy the user too much (otherwise they'd be
3976 # forced to manually unmerge it first).
3978 show_invalid_depstring_notice(pkg, depstr, atoms)
3980 blocker_atoms = [myatom for myatom in atoms \
3981 if myatom.startswith("!")]
3982 blocker_atoms.sort()
3983 counter = long(pkg.metadata["COUNTER"])
3984 blocker_cache[cpv] = \
3985 blocker_cache.BlockerData(counter, blocker_atoms)
3988 for atom in blocker_atoms:
3989 blocker = Blocker(atom=portage.dep.Atom(atom),
3990 eapi=pkg.metadata["EAPI"], root=myroot)
3991 self._blocker_parents.add(blocker, pkg)
3992 except portage.exception.InvalidAtom, e:
3993 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3994 show_invalid_depstring_notice(
3995 pkg, depstr, "Invalid Atom: %s" % (e,))
3997 for cpv in stale_cache:
3998 del blocker_cache[cpv]
3999 blocker_cache.flush()
4002 # Discard any "uninstall" tasks scheduled by previous calls
4003 # to this method, since those tasks may not make sense given
4004 # the current graph state.
4005 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
4006 if previous_uninstall_tasks:
4007 self._blocker_uninstalls = digraph()
4008 self.digraph.difference_update(previous_uninstall_tasks)
4010 for blocker in self._blocker_parents.leaf_nodes():
4011 self.spinner.update()
4012 root_config = self.roots[blocker.root]
4013 virtuals = root_config.settings.getvirtuals()
4014 myroot = blocker.root
4015 initial_db = self.trees[myroot]["vartree"].dbapi
4016 final_db = self.mydbapi[myroot]
4018 provider_virtual = False
4019 if blocker.cp in virtuals and \
4020 not self._have_new_virt(blocker.root, blocker.cp):
4021 provider_virtual = True
4023 # Use this to check PROVIDE for each matched package
4025 atom_set = InternalPackageSet(
4026 initial_atoms=[blocker.atom])
4028 if provider_virtual:
4030 for provider_entry in virtuals[blocker.cp]:
4032 portage.dep_getkey(provider_entry)
4033 atoms.append(blocker.atom.replace(
4034 blocker.cp, provider_cp))
4036 atoms = [blocker.atom]
4038 blocked_initial = set()
4040 for pkg in initial_db.match_pkgs(atom):
4041 if atom_set.findAtomForPackage(pkg):
4042 blocked_initial.add(pkg)
4044 blocked_final = set()
4046 for pkg in final_db.match_pkgs(atom):
4047 if atom_set.findAtomForPackage(pkg):
4048 blocked_final.add(pkg)
4050 if not blocked_initial and not blocked_final:
4051 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
4052 self._blocker_parents.remove(blocker)
4053 # Discard any parents that don't have any more blockers.
4054 for pkg in parent_pkgs:
4055 self._irrelevant_blockers.add(blocker, pkg)
4056 if not self._blocker_parents.child_nodes(pkg):
4057 self._blocker_parents.remove(pkg)
4059 for parent in self._blocker_parents.parent_nodes(blocker):
4060 unresolved_blocks = False
4061 depends_on_order = set()
4062 for pkg in blocked_initial:
4063 if pkg.slot_atom == parent.slot_atom:
4064 # TODO: Support blocks within slots in cases where it
4065 # might make sense. For example, a new version might
4066 # require that the old version be uninstalled at build
4069 if parent.installed:
4070 # Two currently installed packages conflict with
4071 # eachother. Ignore this case since the damage
4072 # is already done and this would be likely to
4073 # confuse users if displayed like a normal blocker.
4076 self._blocked_pkgs.add(pkg, blocker)
4078 if parent.operation == "merge":
4079 # Maybe the blocked package can be replaced or simply
4080 # unmerged to resolve this block.
4081 depends_on_order.add((pkg, parent))
4083 # None of the above blocker resolutions techniques apply,
4084 # so apparently this one is unresolvable.
4085 unresolved_blocks = True
4086 for pkg in blocked_final:
4087 if pkg.slot_atom == parent.slot_atom:
4088 # TODO: Support blocks within slots.
4090 if parent.operation == "nomerge" and \
4091 pkg.operation == "nomerge":
4092 # This blocker will be handled the next time that a
4093 # merge of either package is triggered.
4096 self._blocked_pkgs.add(pkg, blocker)
4098 # Maybe the blocking package can be
4099 # unmerged to resolve this block.
4100 if parent.operation == "merge" and pkg.installed:
4101 depends_on_order.add((pkg, parent))
4103 elif parent.operation == "nomerge":
4104 depends_on_order.add((parent, pkg))
4106 # None of the above blocker resolutions techniques apply,
4107 # so apparently this one is unresolvable.
4108 unresolved_blocks = True
4110 # Make sure we don't unmerge any package that have been pulled
4112 if not unresolved_blocks and depends_on_order:
4113 for inst_pkg, inst_task in depends_on_order:
4114 if self.digraph.contains(inst_pkg) and \
4115 self.digraph.parent_nodes(inst_pkg):
4116 unresolved_blocks = True
4119 if not unresolved_blocks and depends_on_order:
4120 for inst_pkg, inst_task in depends_on_order:
4121 uninst_task = Package(built=inst_pkg.built,
4122 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4123 metadata=inst_pkg.metadata,
4124 operation="uninstall",
4125 root_config=inst_pkg.root_config,
4126 type_name=inst_pkg.type_name)
4127 self._pkg_cache[uninst_task] = uninst_task
4128 # Enforce correct merge order with a hard dep.
4129 self.digraph.addnode(uninst_task, inst_task,
4130 priority=BlockerDepPriority.instance)
4131 # Count references to this blocker so that it can be
4132 # invalidated after nodes referencing it have been
4134 self._blocker_uninstalls.addnode(uninst_task, blocker)
4135 if not unresolved_blocks and not depends_on_order:
4136 self._irrelevant_blockers.add(blocker, parent)
4137 self._blocker_parents.remove_edge(blocker, parent)
4138 if not self._blocker_parents.parent_nodes(blocker):
4139 self._blocker_parents.remove(blocker)
4140 if not self._blocker_parents.child_nodes(parent):
4141 self._blocker_parents.remove(parent)
4142 if unresolved_blocks:
4143 self._unsolvable_blockers.add(blocker, parent)
4147 def _accept_blocker_conflicts(self):
4149 for x in ("--buildpkgonly", "--fetchonly",
4150 "--fetch-all-uri", "--nodeps"):
4151 if x in self.myopts:
4156 def _merge_order_bias(self, mygraph):
4158 For optimal leaf node selection, promote deep system runtime deps and
4159 order nodes from highest to lowest overall reference count.
4163 for node in mygraph.order:
4164 node_info[node] = len(mygraph.parent_nodes(node))
4165 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4167 def cmp_merge_preference(node1, node2):
4169 if node1.operation == 'uninstall':
4170 if node2.operation == 'uninstall':
4174 if node2.operation == 'uninstall':
4175 if node1.operation == 'uninstall':
4179 node1_sys = node1 in deep_system_deps
4180 node2_sys = node2 in deep_system_deps
4181 if node1_sys != node2_sys:
4186 return node_info[node2] - node_info[node1]
4188 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4190 def altlist(self, reversed=False):
4192 while self._serialized_tasks_cache is None:
4193 self._resolve_conflicts()
4195 self._serialized_tasks_cache, self._scheduler_graph = \
4196 self._serialize_tasks()
4197 except self._serialize_tasks_retry:
4200 retlist = self._serialized_tasks_cache[:]
4205 def schedulerGraph(self):
4207 The scheduler graph is identical to the normal one except that
4208 uninstall edges are reversed in specific cases that require
4209 conflicting packages to be temporarily installed simultaneously.
4210 This is intended for use by the Scheduler in it's parallelization
4211 logic. It ensures that temporary simultaneous installation of
4212 conflicting packages is avoided when appropriate (especially for
4213 !!atom blockers), but allowed in specific cases that require it.
4215 Note that this method calls break_refs() which alters the state of
4216 internal Package instances such that this depgraph instance should
4217 not be used to perform any more calculations.
4219 if self._scheduler_graph is None:
4221 self.break_refs(self._scheduler_graph.order)
4222 return self._scheduler_graph
4224 def break_refs(self, nodes):
4226 Take a mergelist like that returned from self.altlist() and
4227 break any references that lead back to the depgraph. This is
4228 useful if you want to hold references to packages without
4229 also holding the depgraph on the heap.
4232 if hasattr(node, "root_config"):
4233 # The FakeVartree references the _package_cache which
4234 # references the depgraph. So that Package instances don't
4235 # hold the depgraph and FakeVartree on the heap, replace
4236 # the RootConfig that references the FakeVartree with the
4237 # original RootConfig instance which references the actual
4239 node.root_config = \
4240 self._trees_orig[node.root_config.root]["root_config"]
4242 def _resolve_conflicts(self):
4243 if not self._complete_graph():
4244 raise self._unknown_internal_error()
4246 if not self.validate_blockers():
4247 raise self._unknown_internal_error()
4249 if self._slot_collision_info:
4250 self._process_slot_conflicts()
4252 def _serialize_tasks(self):
4254 if "--debug" in self.myopts:
4255 writemsg("\ndigraph:\n\n", noiselevel=-1)
4256 self.digraph.debug_print()
4257 writemsg("\n", noiselevel=-1)
4259 scheduler_graph = self.digraph.copy()
4261 if '--nodeps' in self.myopts:
4262 # Preserve the package order given on the command line.
4263 return ([node for node in scheduler_graph \
4264 if isinstance(node, Package) \
4265 and node.operation == 'merge'], scheduler_graph)
4267 mygraph=self.digraph.copy()
4268 # Prune "nomerge" root nodes if nothing depends on them, since
4269 # otherwise they slow down merge order calculation. Don't remove
4270 # non-root nodes since they help optimize merge order in some cases
4271 # such as revdep-rebuild.
4272 removed_nodes = set()
4274 for node in mygraph.root_nodes():
4275 if not isinstance(node, Package) or \
4276 node.installed or node.onlydeps:
4277 removed_nodes.add(node)
4279 self.spinner.update()
4280 mygraph.difference_update(removed_nodes)
4281 if not removed_nodes:
4283 removed_nodes.clear()
4284 self._merge_order_bias(mygraph)
4285 def cmp_circular_bias(n1, n2):
4287 RDEPEND is stronger than PDEPEND and this function
4288 measures such a strength bias within a circular
4289 dependency relationship.
4291 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4292 ignore_priority=priority_range.ignore_medium_soft)
4293 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4294 ignore_priority=priority_range.ignore_medium_soft)
4295 if n1_n2_medium == n2_n1_medium:
4300 myblocker_uninstalls = self._blocker_uninstalls.copy()
4302 # Contains uninstall tasks that have been scheduled to
4303 # occur after overlapping blockers have been installed.
4304 scheduled_uninstalls = set()
4305 # Contains any Uninstall tasks that have been ignored
4306 # in order to avoid the circular deps code path. These
4307 # correspond to blocker conflicts that could not be
4309 ignored_uninstall_tasks = set()
4310 have_uninstall_task = False
4311 complete = "complete" in self.myparams
4314 def get_nodes(**kwargs):
4316 Returns leaf nodes excluding Uninstall instances
4317 since those should be executed as late as possible.
4319 return [node for node in mygraph.leaf_nodes(**kwargs) \
4320 if isinstance(node, Package) and \
4321 (node.operation != "uninstall" or \
4322 node in scheduled_uninstalls)]
4324 # sys-apps/portage needs special treatment if ROOT="/"
4325 running_root = self._running_root.root
4326 from portage.const import PORTAGE_PACKAGE_ATOM
4327 runtime_deps = InternalPackageSet(
4328 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4329 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
4330 PORTAGE_PACKAGE_ATOM)
4331 replacement_portage = self.mydbapi[running_root].match_pkgs(
4332 PORTAGE_PACKAGE_ATOM)
4335 running_portage = running_portage[0]
4337 running_portage = None
4339 if replacement_portage:
4340 replacement_portage = replacement_portage[0]
4342 replacement_portage = None
4344 if replacement_portage == running_portage:
4345 replacement_portage = None
4347 if replacement_portage is not None:
4348 # update from running_portage to replacement_portage asap
4349 asap_nodes.append(replacement_portage)
4351 if running_portage is not None:
4353 portage_rdepend = self._select_atoms_highest_available(
4354 running_root, running_portage.metadata["RDEPEND"],
4355 myuse=running_portage.use.enabled,
4356 parent=running_portage, strict=False)
4357 except portage.exception.InvalidDependString, e:
4358 portage.writemsg("!!! Invalid RDEPEND in " + \
4359 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4360 (running_root, running_portage.cpv, e), noiselevel=-1)
4362 portage_rdepend = []
4363 runtime_deps.update(atom for atom in portage_rdepend \
4364 if not atom.startswith("!"))
4366 def gather_deps(ignore_priority, mergeable_nodes,
4367 selected_nodes, node):
4369 Recursively gather a group of nodes that RDEPEND on
4370 eachother. This ensures that they are merged as a group
4371 and get their RDEPENDs satisfied as soon as possible.
4373 if node in selected_nodes:
4375 if node not in mergeable_nodes:
4377 if node == replacement_portage and \
4378 mygraph.child_nodes(node,
4379 ignore_priority=priority_range.ignore_medium_soft):
4380 # Make sure that portage always has all of it's
4381 # RDEPENDs installed first.
4383 selected_nodes.add(node)
4384 for child in mygraph.child_nodes(node,
4385 ignore_priority=ignore_priority):
4386 if not gather_deps(ignore_priority,
4387 mergeable_nodes, selected_nodes, child):
4391 def ignore_uninst_or_med(priority):
4392 if priority is BlockerDepPriority.instance:
4394 return priority_range.ignore_medium(priority)
4396 def ignore_uninst_or_med_soft(priority):
4397 if priority is BlockerDepPriority.instance:
4399 return priority_range.ignore_medium_soft(priority)
4401 tree_mode = "--tree" in self.myopts
4402 # Tracks whether or not the current iteration should prefer asap_nodes
4403 # if available. This is set to False when the previous iteration
4404 # failed to select any nodes. It is reset whenever nodes are
4405 # successfully selected.
4408 # Controls whether or not the current iteration should drop edges that
4409 # are "satisfied" by installed packages, in order to solve circular
4410 # dependencies. The deep runtime dependencies of installed packages are
4411 # not checked in this case (bug #199856), so it must be avoided
4412 # whenever possible.
4413 drop_satisfied = False
4415 # State of variables for successive iterations that loosen the
4416 # criteria for node selection.
4418 # iteration prefer_asap drop_satisfied
4423 # If no nodes are selected on the last iteration, it is due to
4424 # unresolved blockers or circular dependencies.
4426 while not mygraph.empty():
4427 self.spinner.update()
4428 selected_nodes = None
4429 ignore_priority = None
4430 if drop_satisfied or (prefer_asap and asap_nodes):
4431 priority_range = DepPrioritySatisfiedRange
4433 priority_range = DepPriorityNormalRange
4434 if prefer_asap and asap_nodes:
4435 # ASAP nodes are merged before their soft deps. Go ahead and
4436 # select root nodes here if necessary, since it's typical for
4437 # the parent to have been removed from the graph already.
4438 asap_nodes = [node for node in asap_nodes \
4439 if mygraph.contains(node)]
4440 for node in asap_nodes:
4441 if not mygraph.child_nodes(node,
4442 ignore_priority=priority_range.ignore_soft):
4443 selected_nodes = [node]
4444 asap_nodes.remove(node)
4446 if not selected_nodes and \
4447 not (prefer_asap and asap_nodes):
4448 for i in xrange(priority_range.NONE,
4449 priority_range.MEDIUM_SOFT + 1):
4450 ignore_priority = priority_range.ignore_priority[i]
4451 nodes = get_nodes(ignore_priority=ignore_priority)
4453 # If there is a mix of uninstall nodes with other
4454 # types, save the uninstall nodes for later since
4455 # sometimes a merge node will render an uninstall
4456 # node unnecessary (due to occupying the same slot),
4457 # and we want to avoid executing a separate uninstall
4458 # task in that case.
4460 good_uninstalls = []
4461 with_some_uninstalls_excluded = []
4463 if node.operation == "uninstall":
4464 slot_node = self.mydbapi[node.root
4465 ].match_pkgs(node.slot_atom)
4467 slot_node[0].operation == "merge":
4469 good_uninstalls.append(node)
4470 with_some_uninstalls_excluded.append(node)
4472 nodes = good_uninstalls
4473 elif with_some_uninstalls_excluded:
4474 nodes = with_some_uninstalls_excluded
4478 if ignore_priority is None and not tree_mode:
4479 # Greedily pop all of these nodes since no
4480 # relationship has been ignored. This optimization
4481 # destroys --tree output, so it's disabled in tree
4483 selected_nodes = nodes
4485 # For optimal merge order:
4486 # * Only pop one node.
4487 # * Removing a root node (node without a parent)
4488 # will not produce a leaf node, so avoid it.
4489 # * It's normal for a selected uninstall to be a
4490 # root node, so don't check them for parents.
4492 if node.operation == "uninstall" or \
4493 mygraph.parent_nodes(node):
4494 selected_nodes = [node]
4500 if not selected_nodes:
4501 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4503 mergeable_nodes = set(nodes)
4504 if prefer_asap and asap_nodes:
4506 for i in xrange(priority_range.SOFT,
4507 priority_range.MEDIUM_SOFT + 1):
4508 ignore_priority = priority_range.ignore_priority[i]
4510 if not mygraph.parent_nodes(node):
4512 selected_nodes = set()
4513 if gather_deps(ignore_priority,
4514 mergeable_nodes, selected_nodes, node):
4517 selected_nodes = None
4521 if prefer_asap and asap_nodes and not selected_nodes:
4522 # We failed to find any asap nodes to merge, so ignore
4523 # them for the next iteration.
4527 if selected_nodes and ignore_priority is not None:
4528 # Try to merge ignored medium_soft deps as soon as possible
4529 # if they're not satisfied by installed packages.
4530 for node in selected_nodes:
4531 children = set(mygraph.child_nodes(node))
4532 soft = children.difference(
4533 mygraph.child_nodes(node,
4534 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4535 medium_soft = children.difference(
4536 mygraph.child_nodes(node,
4538 DepPrioritySatisfiedRange.ignore_medium_soft))
4539 medium_soft.difference_update(soft)
4540 for child in medium_soft:
4541 if child in selected_nodes:
4543 if child in asap_nodes:
4545 asap_nodes.append(child)
4547 if selected_nodes and len(selected_nodes) > 1:
4548 if not isinstance(selected_nodes, list):
4549 selected_nodes = list(selected_nodes)
4550 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4552 if not selected_nodes and not myblocker_uninstalls.is_empty():
4553 # An Uninstall task needs to be executed in order to
4554 # avoid conflict if possible.
4557 priority_range = DepPrioritySatisfiedRange
4559 priority_range = DepPriorityNormalRange
4561 mergeable_nodes = get_nodes(
4562 ignore_priority=ignore_uninst_or_med)
4564 min_parent_deps = None
4566 for task in myblocker_uninstalls.leaf_nodes():
4567 # Do some sanity checks so that system or world packages
4568 # don't get uninstalled inappropriately here (only really
4569 # necessary when --complete-graph has not been enabled).
4571 if task in ignored_uninstall_tasks:
4574 if task in scheduled_uninstalls:
4575 # It's been scheduled but it hasn't
4576 # been executed yet due to dependence
4577 # on installation of blocking packages.
4580 root_config = self.roots[task.root]
4581 inst_pkg = self._pkg_cache[
4582 ("installed", task.root, task.cpv, "nomerge")]
4584 if self.digraph.contains(inst_pkg):
4587 forbid_overlap = False
4588 heuristic_overlap = False
4589 for blocker in myblocker_uninstalls.parent_nodes(task):
4590 if blocker.eapi in ("0", "1"):
4591 heuristic_overlap = True
4592 elif blocker.atom.blocker.overlap.forbid:
4593 forbid_overlap = True
4595 if forbid_overlap and running_root == task.root:
4598 if heuristic_overlap and running_root == task.root:
4599 # Never uninstall sys-apps/portage or it's essential
4600 # dependencies, except through replacement.
4602 runtime_dep_atoms = \
4603 list(runtime_deps.iterAtomsForPackage(task))
4604 except portage.exception.InvalidDependString, e:
4605 portage.writemsg("!!! Invalid PROVIDE in " + \
4606 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4607 (task.root, task.cpv, e), noiselevel=-1)
4611 # Don't uninstall a runtime dep if it appears
4612 # to be the only suitable one installed.
4614 vardb = root_config.trees["vartree"].dbapi
4615 for atom in runtime_dep_atoms:
4616 other_version = None
4617 for pkg in vardb.match_pkgs(atom):
4618 if pkg.cpv == task.cpv and \
4619 pkg.metadata["COUNTER"] == \
4620 task.metadata["COUNTER"]:
4624 if other_version is None:
4630 # For packages in the system set, don't take
4631 # any chances. If the conflict can't be resolved
4632 # by a normal replacement operation then abort.
4635 for atom in root_config.sets[
4636 "system"].iterAtomsForPackage(task):
4639 except portage.exception.InvalidDependString, e:
4640 portage.writemsg("!!! Invalid PROVIDE in " + \
4641 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4642 (task.root, task.cpv, e), noiselevel=-1)
4648 # Note that the world check isn't always
4649 # necessary since self._complete_graph() will
4650 # add all packages from the system and world sets to the
4651 # graph. This just allows unresolved conflicts to be
4652 # detected as early as possible, which makes it possible
4653 # to avoid calling self._complete_graph() when it is
4654 # unnecessary due to blockers triggering an abortion.
4656 # For packages in the world set, go ahead an uninstall
4657 # when necessary, as long as the atom will be satisfied
4658 # in the final state.
4659 graph_db = self.mydbapi[task.root]
4662 for atom in root_config.sets[
4663 "world"].iterAtomsForPackage(task):
4665 for pkg in graph_db.match_pkgs(atom):
4672 self._blocked_world_pkgs[inst_pkg] = atom
4674 except portage.exception.InvalidDependString, e:
4675 portage.writemsg("!!! Invalid PROVIDE in " + \
4676 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4677 (task.root, task.cpv, e), noiselevel=-1)
4683 # Check the deps of parent nodes to ensure that
4684 # the chosen task produces a leaf node. Maybe
4685 # this can be optimized some more to make the
4686 # best possible choice, but the current algorithm
4687 # is simple and should be near optimal for most
4689 mergeable_parent = False
4691 for parent in mygraph.parent_nodes(task):
4692 parent_deps.update(mygraph.child_nodes(parent,
4693 ignore_priority=priority_range.ignore_medium_soft))
4694 if parent in mergeable_nodes and \
4695 gather_deps(ignore_uninst_or_med_soft,
4696 mergeable_nodes, set(), parent):
4697 mergeable_parent = True
4699 if not mergeable_parent:
4702 parent_deps.remove(task)
4703 if min_parent_deps is None or \
4704 len(parent_deps) < min_parent_deps:
4705 min_parent_deps = len(parent_deps)
4708 if uninst_task is not None:
4709 # The uninstall is performed only after blocking
4710 # packages have been merged on top of it. File
4711 # collisions between blocking packages are detected
4712 # and removed from the list of files to be uninstalled.
4713 scheduled_uninstalls.add(uninst_task)
4714 parent_nodes = mygraph.parent_nodes(uninst_task)
4716 # Reverse the parent -> uninstall edges since we want
4717 # to do the uninstall after blocking packages have
4718 # been merged on top of it.
4719 mygraph.remove(uninst_task)
4720 for blocked_pkg in parent_nodes:
4721 mygraph.add(blocked_pkg, uninst_task,
4722 priority=BlockerDepPriority.instance)
4723 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4724 scheduler_graph.add(blocked_pkg, uninst_task,
4725 priority=BlockerDepPriority.instance)
4727 # Reset the state variables for leaf node selection and
4728 # continue trying to select leaf nodes.
4730 drop_satisfied = False
4733 if not selected_nodes:
4734 # Only select root nodes as a last resort. This case should
4735 # only trigger when the graph is nearly empty and the only
4736 # remaining nodes are isolated (no parents or children). Since
4737 # the nodes must be isolated, ignore_priority is not needed.
4738 selected_nodes = get_nodes()
4740 if not selected_nodes and not drop_satisfied:
4741 drop_satisfied = True
4744 if not selected_nodes and not myblocker_uninstalls.is_empty():
4745 # If possible, drop an uninstall task here in order to avoid
4746 # the circular deps code path. The corresponding blocker will
4747 # still be counted as an unresolved conflict.
4749 for node in myblocker_uninstalls.leaf_nodes():
4751 mygraph.remove(node)
4756 ignored_uninstall_tasks.add(node)
4759 if uninst_task is not None:
4760 # Reset the state variables for leaf node selection and
4761 # continue trying to select leaf nodes.
4763 drop_satisfied = False
4766 if not selected_nodes:
4767 self._circular_deps_for_display = mygraph
4768 raise self._unknown_internal_error()
4770 # At this point, we've succeeded in selecting one or more nodes, so
4771 # reset state variables for leaf node selection.
4773 drop_satisfied = False
4775 mygraph.difference_update(selected_nodes)
4777 for node in selected_nodes:
4778 if isinstance(node, Package) and \
4779 node.operation == "nomerge":
4782 # Handle interactions between blockers
4783 # and uninstallation tasks.
4784 solved_blockers = set()
4786 if isinstance(node, Package) and \
4787 "uninstall" == node.operation:
4788 have_uninstall_task = True
4791 vardb = self.trees[node.root]["vartree"].dbapi
4792 previous_cpv = vardb.match(node.slot_atom)
4794 # The package will be replaced by this one, so remove
4795 # the corresponding Uninstall task if necessary.
4796 previous_cpv = previous_cpv[0]
4798 ("installed", node.root, previous_cpv, "uninstall")
4800 mygraph.remove(uninst_task)
4804 if uninst_task is not None and \
4805 uninst_task not in ignored_uninstall_tasks and \
4806 myblocker_uninstalls.contains(uninst_task):
4807 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4808 myblocker_uninstalls.remove(uninst_task)
4809 # Discard any blockers that this Uninstall solves.
4810 for blocker in blocker_nodes:
4811 if not myblocker_uninstalls.child_nodes(blocker):
4812 myblocker_uninstalls.remove(blocker)
4813 solved_blockers.add(blocker)
4815 retlist.append(node)
4817 if (isinstance(node, Package) and \
4818 "uninstall" == node.operation) or \
4819 (uninst_task is not None and \
4820 uninst_task in scheduled_uninstalls):
4821 # Include satisfied blockers in the merge list
4822 # since the user might be interested and also
4823 # it serves as an indicator that blocking packages
4824 # will be temporarily installed simultaneously.
4825 for blocker in solved_blockers:
4826 retlist.append(Blocker(atom=blocker.atom,
4827 root=blocker.root, eapi=blocker.eapi,
4830 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
4831 for node in myblocker_uninstalls.root_nodes():
4832 unsolvable_blockers.add(node)
4834 for blocker in unsolvable_blockers:
4835 retlist.append(blocker)
4837 # If any Uninstall tasks need to be executed in order
4838 # to avoid a conflict, complete the graph with any
4839 # dependencies that may have been initially
4840 # neglected (to ensure that unsafe Uninstall tasks
4841 # are properly identified and blocked from execution).
4842 if have_uninstall_task and \
4844 not unsolvable_blockers:
4845 self.myparams.add("complete")
4846 raise self._serialize_tasks_retry("")
4848 if unsolvable_blockers and \
4849 not self._accept_blocker_conflicts():
4850 self._unsatisfied_blockers_for_display = unsolvable_blockers
4851 self._serialized_tasks_cache = retlist[:]
4852 self._scheduler_graph = scheduler_graph
4853 raise self._unknown_internal_error()
4855 if self._slot_collision_info and \
4856 not self._accept_blocker_conflicts():
4857 self._serialized_tasks_cache = retlist[:]
4858 self._scheduler_graph = scheduler_graph
4859 raise self._unknown_internal_error()
4861 return retlist, scheduler_graph
4863 def _show_circular_deps(self, mygraph):
4864 # No leaf nodes are available, so we have a circular
4865 # dependency panic situation. Reduce the noise level to a
4866 # minimum via repeated elimination of root nodes since they
4867 # have no parents and thus can not be part of a cycle.
4869 root_nodes = mygraph.root_nodes(
4870 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
4873 mygraph.difference_update(root_nodes)
4874 # Display the USE flags that are enabled on nodes that are part
4875 # of dependency cycles in case that helps the user decide to
4876 # disable some of them.
4878 tempgraph = mygraph.copy()
4879 while not tempgraph.empty():
4880 nodes = tempgraph.leaf_nodes()
4882 node = tempgraph.order[0]
4885 display_order.append(node)
4886 tempgraph.remove(node)
4887 display_order.reverse()
4888 self.myopts.pop("--quiet", None)
4889 self.myopts.pop("--verbose", None)
4890 self.myopts["--tree"] = True
4891 portage.writemsg("\n\n", noiselevel=-1)
4892 self.display(display_order)
4893 prefix = colorize("BAD", " * ")
4894 portage.writemsg("\n", noiselevel=-1)
4895 portage.writemsg(prefix + "Error: circular dependencies:\n",
4897 portage.writemsg("\n", noiselevel=-1)
4898 mygraph.debug_print()
4899 portage.writemsg("\n", noiselevel=-1)
4900 portage.writemsg(prefix + "Note that circular dependencies " + \
4901 "can often be avoided by temporarily\n", noiselevel=-1)
4902 portage.writemsg(prefix + "disabling USE flags that trigger " + \
4903 "optional dependencies.\n", noiselevel=-1)
4905 def _show_merge_list(self):
4906 if self._serialized_tasks_cache is not None and \
4907 not (self._displayed_list and \
4908 (self._displayed_list == self._serialized_tasks_cache or \
4909 self._displayed_list == \
4910 list(reversed(self._serialized_tasks_cache)))):
4911 display_list = self._serialized_tasks_cache[:]
4912 if "--tree" in self.myopts:
4913 display_list.reverse()
4914 self.display(display_list)
4916 def _show_unsatisfied_blockers(self, blockers):
4917 self._show_merge_list()
4918 msg = "Error: The above package list contains " + \
4919 "packages which cannot be installed " + \
4920 "at the same time on the same system."
4921 prefix = colorize("BAD", " * ")
4922 from textwrap import wrap
4923 portage.writemsg("\n", noiselevel=-1)
4924 for line in wrap(msg, 70):
4925 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4927 # Display the conflicting packages along with the packages
4928 # that pulled them in. This is helpful for troubleshooting
4929 # cases in which blockers don't solve automatically and
4930 # the reasons are not apparent from the normal merge list
4934 for blocker in blockers:
4935 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
4936 self._blocker_parents.parent_nodes(blocker)):
4937 parent_atoms = self._parent_atoms.get(pkg)
4938 if not parent_atoms:
4939 atom = self._blocked_world_pkgs.get(pkg)
4940 if atom is not None:
4941 parent_atoms = set([("@world", atom)])
4943 conflict_pkgs[pkg] = parent_atoms
4946 # Reduce noise by pruning packages that are only
4947 # pulled in by other conflict packages.
4949 for pkg, parent_atoms in conflict_pkgs.iteritems():
4950 relevant_parent = False
4951 for parent, atom in parent_atoms:
4952 if parent not in conflict_pkgs:
4953 relevant_parent = True
4955 if not relevant_parent:
4956 pruned_pkgs.add(pkg)
4957 for pkg in pruned_pkgs:
4958 del conflict_pkgs[pkg]
4964 # Max number of parents shown, to avoid flooding the display.
4966 for pkg, parent_atoms in conflict_pkgs.iteritems():
4970 # Prefer packages that are not directly involved in a conflict.
4971 for parent_atom in parent_atoms:
4972 if len(pruned_list) >= max_parents:
4974 parent, atom = parent_atom
4975 if parent not in conflict_pkgs:
4976 pruned_list.add(parent_atom)
4978 for parent_atom in parent_atoms:
4979 if len(pruned_list) >= max_parents:
4981 pruned_list.add(parent_atom)
4983 omitted_parents = len(parent_atoms) - len(pruned_list)
4984 msg.append(indent + "%s pulled in by\n" % pkg)
4986 for parent_atom in pruned_list:
4987 parent, atom = parent_atom
4988 msg.append(2*indent)
4989 if isinstance(parent,
4990 (PackageArg, AtomArg)):
4991 # For PackageArg and AtomArg types, it's
4992 # redundant to display the atom attribute.
4993 msg.append(str(parent))
4995 # Display the specific atom from SetArg or
4997 msg.append("%s required by %s" % (atom, parent))
5001 msg.append(2*indent)
5002 msg.append("(and %d more)\n" % omitted_parents)
5006 sys.stderr.write("".join(msg))
5009 if "--quiet" not in self.myopts:
5010 show_blocker_docs_link()
5012 def display(self, mylist, favorites=[], verbosity=None):
5014 # This is used to prevent display_problems() from
5015 # redundantly displaying this exact same merge list
5016 # again via _show_merge_list().
5017 self._displayed_list = mylist
5019 if verbosity is None:
5020 verbosity = ("--quiet" in self.myopts and 1 or \
5021 "--verbose" in self.myopts and 3 or 2)
5022 favorites_set = InternalPackageSet(favorites)
5023 oneshot = "--oneshot" in self.myopts or \
5024 "--onlydeps" in self.myopts
5025 columns = "--columns" in self.myopts
5030 counters = PackageCounters()
5032 if verbosity == 1 and "--verbose" not in self.myopts:
5033 def create_use_string(*args):
5036 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
5038 is_new, reinst_flags,
5039 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
5040 alphabetical=("--alphabetical" in self.myopts)):
5048 cur_iuse = set(cur_iuse)
5049 enabled_flags = cur_iuse.intersection(cur_use)
5050 removed_iuse = set(old_iuse).difference(cur_iuse)
5051 any_iuse = cur_iuse.union(old_iuse)
5052 any_iuse = list(any_iuse)
5054 for flag in any_iuse:
5057 reinst_flag = reinst_flags and flag in reinst_flags
5058 if flag in enabled_flags:
5060 if is_new or flag in old_use and \
5061 (all_flags or reinst_flag):
5062 flag_str = red(flag)
5063 elif flag not in old_iuse:
5064 flag_str = yellow(flag) + "%*"
5065 elif flag not in old_use:
5066 flag_str = green(flag) + "*"
5067 elif flag in removed_iuse:
5068 if all_flags or reinst_flag:
5069 flag_str = yellow("-" + flag) + "%"
5072 flag_str = "(" + flag_str + ")"
5073 removed.append(flag_str)
5076 if is_new or flag in old_iuse and \
5077 flag not in old_use and \
5078 (all_flags or reinst_flag):
5079 flag_str = blue("-" + flag)
5080 elif flag not in old_iuse:
5081 flag_str = yellow("-" + flag)
5082 if flag not in iuse_forced:
5084 elif flag in old_use:
5085 flag_str = green("-" + flag) + "*"
5087 if flag in iuse_forced:
5088 flag_str = "(" + flag_str + ")"
5090 enabled.append(flag_str)
5092 disabled.append(flag_str)
5095 ret = " ".join(enabled)
5097 ret = " ".join(enabled + disabled + removed)
5099 ret = '%s="%s" ' % (name, ret)
5102 repo_display = RepoDisplay(self.roots)
5106 mygraph = self.digraph.copy()
5108 # If there are any Uninstall instances, add the corresponding
5109 # blockers to the digraph (useful for --tree display).
5111 executed_uninstalls = set(node for node in mylist \
5112 if isinstance(node, Package) and node.operation == "unmerge")
5114 for uninstall in self._blocker_uninstalls.leaf_nodes():
5115 uninstall_parents = \
5116 self._blocker_uninstalls.parent_nodes(uninstall)
5117 if not uninstall_parents:
5120 # Remove the corresponding "nomerge" node and substitute
5121 # the Uninstall node.
5122 inst_pkg = self._pkg_cache[
5123 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
5125 mygraph.remove(inst_pkg)
5130 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
5132 inst_pkg_blockers = []
5134 # Break the Package -> Uninstall edges.
5135 mygraph.remove(uninstall)
5137 # Resolution of a package's blockers
5138 # depend on it's own uninstallation.
5139 for blocker in inst_pkg_blockers:
5140 mygraph.add(uninstall, blocker)
5142 # Expand Package -> Uninstall edges into
5143 # Package -> Blocker -> Uninstall edges.
5144 for blocker in uninstall_parents:
5145 mygraph.add(uninstall, blocker)
5146 for parent in self._blocker_parents.parent_nodes(blocker):
5147 if parent != inst_pkg:
5148 mygraph.add(blocker, parent)
5150 # If the uninstall task did not need to be executed because
5151 # of an upgrade, display Blocker -> Upgrade edges since the
5152 # corresponding Blocker -> Uninstall edges will not be shown.
5154 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
5155 if upgrade_node is not None and \
5156 uninstall not in executed_uninstalls:
5157 for blocker in uninstall_parents:
5158 mygraph.add(upgrade_node, blocker)
5160 unsatisfied_blockers = []
5165 if isinstance(x, Blocker) and not x.satisfied:
5166 unsatisfied_blockers.append(x)
5169 if "--tree" in self.myopts:
5170 depth = len(tree_nodes)
5171 while depth and graph_key not in \
5172 mygraph.child_nodes(tree_nodes[depth-1]):
5175 tree_nodes = tree_nodes[:depth]
5176 tree_nodes.append(graph_key)
5177 display_list.append((x, depth, True))
5178 shown_edges.add((graph_key, tree_nodes[depth-1]))
5180 traversed_nodes = set() # prevent endless circles
5181 traversed_nodes.add(graph_key)
5182 def add_parents(current_node, ordered):
5184 # Do not traverse to parents if this node is an
5185 # an argument or a direct member of a set that has
5186 # been specified as an argument (system or world).
5187 if current_node not in self._set_nodes:
5188 parent_nodes = mygraph.parent_nodes(current_node)
5190 child_nodes = set(mygraph.child_nodes(current_node))
5191 selected_parent = None
5192 # First, try to avoid a direct cycle.
5193 for node in parent_nodes:
5194 if not isinstance(node, (Blocker, Package)):
5196 if node not in traversed_nodes and \
5197 node not in child_nodes:
5198 edge = (current_node, node)
5199 if edge in shown_edges:
5201 selected_parent = node
5203 if not selected_parent:
5204 # A direct cycle is unavoidable.
5205 for node in parent_nodes:
5206 if not isinstance(node, (Blocker, Package)):
5208 if node not in traversed_nodes:
5209 edge = (current_node, node)
5210 if edge in shown_edges:
5212 selected_parent = node
5215 shown_edges.add((current_node, selected_parent))
5216 traversed_nodes.add(selected_parent)
5217 add_parents(selected_parent, False)
5218 display_list.append((current_node,
5219 len(tree_nodes), ordered))
5220 tree_nodes.append(current_node)
5222 add_parents(graph_key, True)
5224 display_list.append((x, depth, True))
5225 mylist = display_list
5226 for x in unsatisfied_blockers:
5227 mylist.append((x, 0, True))
5229 last_merge_depth = 0
5230 for i in xrange(len(mylist)-1,-1,-1):
5231 graph_key, depth, ordered = mylist[i]
5232 if not ordered and depth == 0 and i > 0 \
5233 and graph_key == mylist[i-1][0] and \
5234 mylist[i-1][1] == 0:
5235 # An ordered node got a consecutive duplicate when the tree was
5239 if ordered and graph_key[-1] != "nomerge":
5240 last_merge_depth = depth
5242 if depth >= last_merge_depth or \
5243 i < len(mylist) - 1 and \
5244 depth >= mylist[i+1][1]:
5247 from portage import flatten
5248 from portage.dep import use_reduce, paren_reduce
5249 # files to fetch list - avoids counting a same file twice
5250 # in size display (verbose mode)
5253 # Use this set to detect when all the "repoadd" strings are "[0]"
5254 # and disable the entire repo display in this case.
5257 for mylist_index in xrange(len(mylist)):
5258 x, depth, ordered = mylist[mylist_index]
5262 portdb = self.trees[myroot]["porttree"].dbapi
5263 bindb = self.trees[myroot]["bintree"].dbapi
5264 vardb = self.trees[myroot]["vartree"].dbapi
5265 vartree = self.trees[myroot]["vartree"]
5266 pkgsettings = self.pkgsettings[myroot]
5269 indent = " " * depth
5271 if isinstance(x, Blocker):
5273 blocker_style = "PKG_BLOCKER_SATISFIED"
5274 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
5276 blocker_style = "PKG_BLOCKER"
5277 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
5279 counters.blocks += 1
5281 counters.blocks_satisfied += 1
5282 resolved = portage.key_expand(
5283 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
5284 if "--columns" in self.myopts and "--quiet" in self.myopts:
5285 addl += " " + colorize(blocker_style, resolved)
5287 addl = "[%s %s] %s%s" % \
5288 (colorize(blocker_style, "blocks"),
5289 addl, indent, colorize(blocker_style, resolved))
5290 block_parents = self._blocker_parents.parent_nodes(x)
5291 block_parents = set([pnode[2] for pnode in block_parents])
5292 block_parents = ", ".join(block_parents)
5294 addl += colorize(blocker_style,
5295 " (\"%s\" is blocking %s)") % \
5296 (str(x.atom).lstrip("!"), block_parents)
5298 addl += colorize(blocker_style,
5299 " (is blocking %s)") % block_parents
5300 if isinstance(x, Blocker) and x.satisfied:
5305 blockers.append(addl)
5308 pkg_merge = ordered and pkg_status == "merge"
5309 if not pkg_merge and pkg_status == "merge":
5310 pkg_status = "nomerge"
5311 built = pkg_type != "ebuild"
5312 installed = pkg_type == "installed"
5314 metadata = pkg.metadata
5316 repo_name = metadata["repository"]
5317 if pkg_type == "ebuild":
5318 ebuild_path = portdb.findname(pkg_key)
5319 if not ebuild_path: # shouldn't happen
5320 raise portage.exception.PackageNotFound(pkg_key)
5321 repo_path_real = os.path.dirname(os.path.dirname(
5322 os.path.dirname(ebuild_path)))
5324 repo_path_real = portdb.getRepositoryPath(repo_name)
5325 pkg_use = list(pkg.use.enabled)
5327 restrict = flatten(use_reduce(paren_reduce(
5328 pkg.metadata["RESTRICT"]), uselist=pkg_use))
5329 except portage.exception.InvalidDependString, e:
5330 if not pkg.installed:
5331 show_invalid_depstring_notice(x,
5332 pkg.metadata["RESTRICT"], str(e))
5336 if "ebuild" == pkg_type and x[3] != "nomerge" and \
5337 "fetch" in restrict:
5340 counters.restrict_fetch += 1
5341 if portdb.fetch_check(pkg_key, pkg_use):
5344 counters.restrict_fetch_satisfied += 1
5346 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
5347 #param is used for -u, where you still *do* want to see when something is being upgraded.
5350 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
5351 if vardb.cpv_exists(pkg_key):
5352 addl=" "+yellow("R")+fetch+" "
5355 counters.reinst += 1
5356 elif pkg_status == "uninstall":
5357 counters.uninst += 1
5358 # filter out old-style virtual matches
5359 elif installed_versions and \
5360 portage.cpv_getkey(installed_versions[0]) == \
5361 portage.cpv_getkey(pkg_key):
5362 myinslotlist = vardb.match(pkg.slot_atom)
5363 # If this is the first install of a new-style virtual, we
5364 # need to filter out old-style virtual matches.
5365 if myinslotlist and \
5366 portage.cpv_getkey(myinslotlist[0]) != \
5367 portage.cpv_getkey(pkg_key):
5370 myoldbest = myinslotlist[:]
5372 if not portage.dep.cpvequal(pkg_key,
5373 portage.best([pkg_key] + myoldbest)):
5375 addl += turquoise("U")+blue("D")
5377 counters.downgrades += 1
5380 addl += turquoise("U") + " "
5382 counters.upgrades += 1
5384 # New slot, mark it new.
5385 addl = " " + green("NS") + fetch + " "
5386 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
5388 counters.newslot += 1
5390 if "--changelog" in self.myopts:
5391 inst_matches = vardb.match(pkg.slot_atom)
5393 changelogs.extend(self.calc_changelog(
5394 portdb.findname(pkg_key),
5395 inst_matches[0], pkg_key))
5397 addl = " " + green("N") + " " + fetch + " "
5406 forced_flags = set()
5407 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
5408 forced_flags.update(pkgsettings.useforce)
5409 forced_flags.update(pkgsettings.usemask)
5411 cur_use = [flag for flag in pkg.use.enabled \
5412 if flag in pkg.iuse.all]
5413 cur_iuse = sorted(pkg.iuse.all)
5415 if myoldbest and myinslotlist:
5416 previous_cpv = myoldbest[0]
5418 previous_cpv = pkg.cpv
5419 if vardb.cpv_exists(previous_cpv):
5420 old_iuse, old_use = vardb.aux_get(
5421 previous_cpv, ["IUSE", "USE"])
5422 old_iuse = list(set(
5423 filter_iuse_defaults(old_iuse.split())))
5425 old_use = old_use.split()
5432 old_use = [flag for flag in old_use if flag in old_iuse]
5434 use_expand = pkgsettings["USE_EXPAND"].lower().split()
5436 use_expand.reverse()
5437 use_expand_hidden = \
5438 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
5440 def map_to_use_expand(myvals, forcedFlags=False,
5444 for exp in use_expand:
5447 for val in myvals[:]:
5448 if val.startswith(exp.lower()+"_"):
5449 if val in forced_flags:
5450 forced[exp].add(val[len(exp)+1:])
5451 ret[exp].append(val[len(exp)+1:])
5454 forced["USE"] = [val for val in myvals \
5455 if val in forced_flags]
5457 for exp in use_expand_hidden:
5463 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
5464 # are the only thing that triggered reinstallation.
5465 reinst_flags_map = {}
5466 reinstall_for_flags = self._reinstall_nodes.get(pkg)
5467 reinst_expand_map = None
5468 if reinstall_for_flags:
5469 reinst_flags_map = map_to_use_expand(
5470 list(reinstall_for_flags), removeHidden=False)
5471 for k in list(reinst_flags_map):
5472 if not reinst_flags_map[k]:
5473 del reinst_flags_map[k]
5474 if not reinst_flags_map.get("USE"):
5475 reinst_expand_map = reinst_flags_map.copy()
5476 reinst_expand_map.pop("USE", None)
5477 if reinst_expand_map and \
5478 not set(reinst_expand_map).difference(
5480 use_expand_hidden = \
5481 set(use_expand_hidden).difference(
5484 cur_iuse_map, iuse_forced = \
5485 map_to_use_expand(cur_iuse, forcedFlags=True)
5486 cur_use_map = map_to_use_expand(cur_use)
5487 old_iuse_map = map_to_use_expand(old_iuse)
5488 old_use_map = map_to_use_expand(old_use)
5491 use_expand.insert(0, "USE")
5493 for key in use_expand:
5494 if key in use_expand_hidden:
5496 verboseadd += create_use_string(key.upper(),
5497 cur_iuse_map[key], iuse_forced[key],
5498 cur_use_map[key], old_iuse_map[key],
5499 old_use_map[key], is_new,
5500 reinst_flags_map.get(key))
5505 if pkg_type == "ebuild" and pkg_merge:
5507 myfilesdict = portdb.getfetchsizes(pkg_key,
5508 useflags=pkg_use, debug=self.edebug)
5509 except portage.exception.InvalidDependString, e:
5510 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
5511 show_invalid_depstring_notice(x, src_uri, str(e))
5514 if myfilesdict is None:
5515 myfilesdict="[empty/missing/bad digest]"
5517 for myfetchfile in myfilesdict:
5518 if myfetchfile not in myfetchlist:
5519 mysize+=myfilesdict[myfetchfile]
5520 myfetchlist.append(myfetchfile)
5522 counters.totalsize += mysize
5523 verboseadd += format_size(mysize)
5526 # assign index for a previous version in the same slot
5527 has_previous = False
5528 repo_name_prev = None
5529 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
5531 slot_matches = vardb.match(slot_atom)
5534 repo_name_prev = vardb.aux_get(slot_matches[0],
5537 # now use the data to generate output
5538 if pkg.installed or not has_previous:
5539 repoadd = repo_display.repoStr(repo_path_real)
5541 repo_path_prev = None
5543 repo_path_prev = portdb.getRepositoryPath(
5545 if repo_path_prev == repo_path_real:
5546 repoadd = repo_display.repoStr(repo_path_real)
5548 repoadd = "%s=>%s" % (
5549 repo_display.repoStr(repo_path_prev),
5550 repo_display.repoStr(repo_path_real))
5552 repoadd_set.add(repoadd)
5554 xs = [portage.cpv_getkey(pkg_key)] + \
5555 list(portage.catpkgsplit(pkg_key)[2:])
5562 if "COLUMNWIDTH" in self.settings:
5564 mywidth = int(self.settings["COLUMNWIDTH"])
5565 except ValueError, e:
5566 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
5568 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
5569 self.settings["COLUMNWIDTH"], noiselevel=-1)
5571 oldlp = mywidth - 30
5574 # Convert myoldbest from a list to a string.
5578 for pos, key in enumerate(myoldbest):
5579 key = portage.catpkgsplit(key)[2] + \
5580 "-" + portage.catpkgsplit(key)[3]
5581 if key[-3:] == "-r0":
5583 myoldbest[pos] = key
5584 myoldbest = blue("["+", ".join(myoldbest)+"]")
5587 root_config = self.roots[myroot]
5588 system_set = root_config.sets["system"]
5589 world_set = root_config.sets["world"]
5594 pkg_system = system_set.findAtomForPackage(pkg)
5595 pkg_world = world_set.findAtomForPackage(pkg)
5596 if not (oneshot or pkg_world) and \
5597 myroot == self.target_root and \
5598 favorites_set.findAtomForPackage(pkg):
5599 # Maybe it will be added to world now.
5600 if create_world_atom(pkg, favorites_set, root_config):
5602 except portage.exception.InvalidDependString:
5603 # This is reported elsewhere if relevant.
5606 def pkgprint(pkg_str):
5609 return colorize("PKG_MERGE_SYSTEM", pkg_str)
5611 return colorize("PKG_MERGE_WORLD", pkg_str)
5613 return colorize("PKG_MERGE", pkg_str)
5614 elif pkg_status == "uninstall":
5615 return colorize("PKG_UNINSTALL", pkg_str)
5618 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
5620 return colorize("PKG_NOMERGE_WORLD", pkg_str)
5622 return colorize("PKG_NOMERGE", pkg_str)
5625 properties = flatten(use_reduce(paren_reduce(
5626 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
5627 except portage.exception.InvalidDependString, e:
5628 if not pkg.installed:
5629 show_invalid_depstring_notice(pkg,
5630 pkg.metadata["PROPERTIES"], str(e))
5634 interactive = "interactive" in properties
5635 if interactive and pkg.operation == "merge":
5636 addl = colorize("WARN", "I") + addl[1:]
5638 counters.interactive += 1
5643 if "--columns" in self.myopts:
5644 if "--quiet" in self.myopts:
5645 myprint=addl+" "+indent+pkgprint(pkg_cp)
5646 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
5647 myprint=myprint+myoldbest
5648 myprint=myprint+darkgreen("to "+x[1])
5652 myprint = "[%s] %s%s" % \
5653 (pkgprint(pkg_status.ljust(13)),
5654 indent, pkgprint(pkg.cp))
5656 myprint = "[%s %s] %s%s" % \
5657 (pkgprint(pkg.type_name), addl,
5658 indent, pkgprint(pkg.cp))
5659 if (newlp-nc_len(myprint)) > 0:
5660 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5661 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
5662 if (oldlp-nc_len(myprint)) > 0:
5663 myprint=myprint+" "*(oldlp-nc_len(myprint))
5664 myprint=myprint+myoldbest
5665 myprint += darkgreen("to " + pkg.root)
5668 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
5670 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
5671 myprint += indent + pkgprint(pkg_key) + " " + \
5672 myoldbest + darkgreen("to " + myroot)
5674 if "--columns" in self.myopts:
5675 if "--quiet" in self.myopts:
5676 myprint=addl+" "+indent+pkgprint(pkg_cp)
5677 myprint=myprint+" "+green(xs[1]+xs[2])+" "
5678 myprint=myprint+myoldbest
5682 myprint = "[%s] %s%s" % \
5683 (pkgprint(pkg_status.ljust(13)),
5684 indent, pkgprint(pkg.cp))
5686 myprint = "[%s %s] %s%s" % \
5687 (pkgprint(pkg.type_name), addl,
5688 indent, pkgprint(pkg.cp))
5689 if (newlp-nc_len(myprint)) > 0:
5690 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5691 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
5692 if (oldlp-nc_len(myprint)) > 0:
5693 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
5694 myprint += myoldbest
5697 myprint = "[%s] %s%s %s" % \
5698 (pkgprint(pkg_status.ljust(13)),
5699 indent, pkgprint(pkg.cpv),
5702 myprint = "[%s %s] %s%s %s" % \
5703 (pkgprint(pkg_type), addl, indent,
5704 pkgprint(pkg.cpv), myoldbest)
5706 if columns and pkg.operation == "uninstall":
5708 p.append((myprint, verboseadd, repoadd))
5710 if "--tree" not in self.myopts and \
5711 "--quiet" not in self.myopts and \
5712 not self._opts_no_restart.intersection(self.myopts) and \
5713 pkg.root == self._running_root.root and \
5714 portage.match_from_list(
5715 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
5716 not vardb.cpv_exists(pkg.cpv) and \
5717 "--quiet" not in self.myopts:
5718 if mylist_index < len(mylist) - 1:
5719 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
5720 p.append(colorize("WARN", " then resume the merge."))
5723 show_repos = repoadd_set and repoadd_set != set(["0"])
5726 if isinstance(x, basestring):
5727 out.write("%s\n" % (x,))
5730 myprint, verboseadd, repoadd = x
5733 myprint += " " + verboseadd
5735 if show_repos and repoadd:
5736 myprint += " " + teal("[%s]" % repoadd)
5738 out.write("%s\n" % (myprint,))
5747 sys.stdout.write(str(repo_display))
5749 if "--changelog" in self.myopts:
5751 for revision,text in changelogs:
5752 print bold('*'+revision)
5753 sys.stdout.write(text)
5758 def display_problems(self):
5760 Display problems with the dependency graph such as slot collisions.
5761 This is called internally by display() to show the problems _after_
5762 the merge list where it is most likely to be seen, but if display()
5763 is not going to be called then this method should be called explicitly
5764 to ensure that the user is notified of problems with the graph.
5766 All output goes to stderr, except for unsatisfied dependencies which
5767 go to stdout for parsing by programs such as autounmask.
5770 # Note that show_masked_packages() sends it's output to
5771 # stdout, and some programs such as autounmask parse the
5772 # output in cases when emerge bails out. However, when
5773 # show_masked_packages() is called for installed packages
5774 # here, the message is a warning that is more appropriate
5775 # to send to stderr, so temporarily redirect stdout to
5776 # stderr. TODO: Fix output code so there's a cleaner way
5777 # to redirect everything to stderr.
5782 sys.stdout = sys.stderr
5783 self._display_problems()
5789 # This goes to stdout for parsing by programs like autounmask.
5790 for pargs, kwargs in self._unsatisfied_deps_for_display:
5791 self._show_unsatisfied_dep(*pargs, **kwargs)
5793 def _display_problems(self):
5794 if self._circular_deps_for_display is not None:
5795 self._show_circular_deps(
5796 self._circular_deps_for_display)
5798 # The user is only notified of a slot conflict if
5799 # there are no unresolvable blocker conflicts.
5800 if self._unsatisfied_blockers_for_display is not None:
5801 self._show_unsatisfied_blockers(
5802 self._unsatisfied_blockers_for_display)
5804 self._show_slot_collision_notice()
5806 # TODO: Add generic support for "set problem" handlers so that
5807 # the below warnings aren't special cases for world only.
5809 if self._missing_args:
5810 world_problems = False
5811 if "world" in self._sets:
5812 # Filter out indirect members of world (from nested sets)
5813 # since only direct members of world are desired here.
5814 world_set = self.roots[self.target_root].sets["world"]
5815 for arg, atom in self._missing_args:
5816 if arg.name == "world" and atom in world_set:
5817 world_problems = True
5821 sys.stderr.write("\n!!! Problems have been " + \
5822 "detected with your world file\n")
5823 sys.stderr.write("!!! Please run " + \
5824 green("emaint --check world")+"\n\n")
5826 if self._missing_args:
5827 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5828 " Ebuilds for the following packages are either all\n")
5829 sys.stderr.write(colorize("BAD", "!!!") + \
5830 " masked or don't exist:\n")
5831 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5832 self._missing_args) + "\n")
5834 if self._pprovided_args:
5836 for arg, atom in self._pprovided_args:
5837 if isinstance(arg, SetArg):
5839 arg_atom = (atom, atom)
5842 arg_atom = (arg.arg, atom)
5843 refs = arg_refs.setdefault(arg_atom, [])
5844 if parent not in refs:
5847 msg.append(bad("\nWARNING: "))
5848 if len(self._pprovided_args) > 1:
5849 msg.append("Requested packages will not be " + \
5850 "merged because they are listed in\n")
5852 msg.append("A requested package will not be " + \
5853 "merged because it is listed in\n")
5854 msg.append("package.provided:\n\n")
5855 problems_sets = set()
5856 for (arg, atom), refs in arg_refs.iteritems():
5859 problems_sets.update(refs)
5861 ref_string = ", ".join(["'%s'" % name for name in refs])
5862 ref_string = " pulled in by " + ref_string
5863 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5865 if "world" in problems_sets:
5866 msg.append("This problem can be solved in one of the following ways:\n\n")
5867 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5868 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5869 msg.append(" C) Remove offending entries from package.provided.\n\n")
5870 msg.append("The best course of action depends on the reason that an offending\n")
5871 msg.append("package.provided entry exists.\n\n")
5872 sys.stderr.write("".join(msg))
5874 masked_packages = []
5875 for pkg in self._masked_installed:
5876 root_config = pkg.root_config
5877 pkgsettings = self.pkgsettings[pkg.root]
5878 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5879 masked_packages.append((root_config, pkgsettings,
5880 pkg.cpv, pkg.metadata, mreasons))
5882 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5883 " The following installed packages are masked:\n")
5884 show_masked_packages(masked_packages)
5888 def calc_changelog(self,ebuildpath,current,next):
5889 if ebuildpath == None or not os.path.exists(ebuildpath):
5891 current = '-'.join(portage.catpkgsplit(current)[1:])
5892 if current.endswith('-r0'):
5893 current = current[:-3]
5894 next = '-'.join(portage.catpkgsplit(next)[1:])
5895 if next.endswith('-r0'):
5897 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
5899 changelog = open(changelogpath).read()
5900 except SystemExit, e:
5901 raise # Needed else can't exit
5904 divisions = self.find_changelog_tags(changelog)
5905 #print 'XX from',current,'to',next
5906 #for div,text in divisions: print 'XX',div
5907 # skip entries for all revisions above the one we are about to emerge
5908 for i in range(len(divisions)):
5909 if divisions[i][0]==next:
5910 divisions = divisions[i:]
5912 # find out how many entries we are going to display
5913 for i in range(len(divisions)):
5914 if divisions[i][0]==current:
5915 divisions = divisions[:i]
5918 # couldnt find the current revision in the list. display nothing
5922 def find_changelog_tags(self,changelog):
5926 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
5928 if release is not None:
5929 divs.append((release,changelog))
5931 if release is not None:
5932 divs.append((release,changelog[:match.start()]))
5933 changelog = changelog[match.end():]
5934 release = match.group(1)
5935 if release.endswith('.ebuild'):
5936 release = release[:-7]
5937 if release.endswith('-r0'):
5938 release = release[:-3]
5940 def saveNomergeFavorites(self):
5941 """Find atoms in favorites that are not in the mergelist and add them
5942 to the world file if necessary."""
5943 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5944 "--oneshot", "--onlydeps", "--pretend"):
5945 if x in self.myopts:
5947 root_config = self.roots[self.target_root]
5948 world_set = root_config.sets["world"]
5950 world_locked = False
5951 if hasattr(world_set, "lock"):
5955 if hasattr(world_set, "load"):
5956 world_set.load() # maybe it's changed on disk
5958 args_set = self._sets["args"]
5959 portdb = self.trees[self.target_root]["porttree"].dbapi
5960 added_favorites = set()
5961 for x in self._set_nodes:
5962 pkg_type, root, pkg_key, pkg_status = x
5963 if pkg_status != "nomerge":
5967 myfavkey = create_world_atom(x, args_set, root_config)
5969 if myfavkey in added_favorites:
5971 added_favorites.add(myfavkey)
5972 except portage.exception.InvalidDependString, e:
5973 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5974 (pkg_key, str(e)), noiselevel=-1)
5975 writemsg("!!! see '%s'\n\n" % os.path.join(
5976 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
5979 for k in self._sets:
5980 if k in ("args", "world") or not root_config.sets[k].world_candidate:
5985 all_added.append(SETPREFIX + k)
5986 all_added.extend(added_favorites)
5989 print ">>> Recording %s in \"world\" favorites file..." % \
5990 colorize("INFORM", str(a))
5992 world_set.update(all_added)
5997 def loadResumeCommand(self, resume_data, skip_masked=True,
6000 Add a resume command to the graph and validate it in the process. This
6001 will raise a PackageNotFound exception if a package is not available.
6004 if not isinstance(resume_data, dict):
6007 mergelist = resume_data.get("mergelist")
6008 if not isinstance(mergelist, list):
6011 fakedb = self.mydbapi
6013 serialized_tasks = []
6016 if not (isinstance(x, list) and len(x) == 4):
6018 pkg_type, myroot, pkg_key, action = x
6019 if pkg_type not in self.pkg_tree_map:
6021 if action != "merge":
6023 tree_type = self.pkg_tree_map[pkg_type]
6024 mydb = trees[myroot][tree_type].dbapi
6025 db_keys = list(self._trees_orig[myroot][
6026 tree_type].dbapi._aux_cache_keys)
6028 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
6030 # It does no exist or it is corrupt.
6031 if action == "uninstall":
6034 # TODO: log these somewhere
6036 raise portage.exception.PackageNotFound(pkg_key)
6037 installed = action == "uninstall"
6038 built = pkg_type != "ebuild"
6039 root_config = self.roots[myroot]
6040 pkg = Package(built=built, cpv=pkg_key,
6041 installed=installed, metadata=metadata,
6042 operation=action, root_config=root_config,
6044 if pkg_type == "ebuild":
6045 pkgsettings = self.pkgsettings[myroot]
6046 pkgsettings.setcpv(pkg)
6047 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6048 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
6049 self._pkg_cache[pkg] = pkg
6051 root_config = self.roots[pkg.root]
6052 if "merge" == pkg.operation and \
6053 not visible(root_config.settings, pkg):
6055 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6057 self._unsatisfied_deps_for_display.append(
6058 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6060 fakedb[myroot].cpv_inject(pkg)
6061 serialized_tasks.append(pkg)
6062 self.spinner.update()
6064 if self._unsatisfied_deps_for_display:
6067 if not serialized_tasks or "--nodeps" in self.myopts:
6068 self._serialized_tasks_cache = serialized_tasks
6069 self._scheduler_graph = self.digraph
6071 self._select_package = self._select_pkg_from_graph
6072 self.myparams.add("selective")
6073 # Always traverse deep dependencies in order to account for
6074 # potentially unsatisfied dependencies of installed packages.
6075 # This is necessary for correct --keep-going or --resume operation
6076 # in case a package from a group of circularly dependent packages
6077 # fails. In this case, a package which has recently been installed
6078 # may have an unsatisfied circular dependency (pulled in by
6079 # PDEPEND, for example). So, even though a package is already
6080 # installed, it may not have all of it's dependencies satisfied, so
6081 # it may not be usable. If such a package is in the subgraph of
6082 # deep depenedencies of a scheduled build, that build needs to
6083 # be cancelled. In order for this type of situation to be
6084 # recognized, deep traversal of dependencies is required.
6085 self.myparams.add("deep")
6087 favorites = resume_data.get("favorites")
6088 args_set = self._sets["args"]
6089 if isinstance(favorites, list):
6090 args = self._load_favorites(favorites)
6094 for task in serialized_tasks:
6095 if isinstance(task, Package) and \
6096 task.operation == "merge":
6097 if not self._add_pkg(task, None):
6100 # Packages for argument atoms need to be explicitly
6101 # added via _add_pkg() so that they are included in the
6102 # digraph (needed at least for --tree display).
6104 for atom in arg.set:
6105 pkg, existing_node = self._select_package(
6106 arg.root_config.root, atom)
6107 if existing_node is None and \
6109 if not self._add_pkg(pkg, Dependency(atom=atom,
6110 root=pkg.root, parent=arg)):
6113 # Allow unsatisfied deps here to avoid showing a masking
6114 # message for an unsatisfied dep that isn't necessarily
6116 if not self._create_graph(allow_unsatisfied=True):
6119 unsatisfied_deps = []
6120 for dep in self._unsatisfied_deps:
6121 if not isinstance(dep.parent, Package):
6123 if dep.parent.operation == "merge":
6124 unsatisfied_deps.append(dep)
6127 # For unsatisfied deps of installed packages, only account for
6128 # them if they are in the subgraph of dependencies of a package
6129 # which is scheduled to be installed.
6130 unsatisfied_install = False
6132 dep_stack = self.digraph.parent_nodes(dep.parent)
6134 node = dep_stack.pop()
6135 if not isinstance(node, Package):
6137 if node.operation == "merge":
6138 unsatisfied_install = True
6140 if node in traversed:
6143 dep_stack.extend(self.digraph.parent_nodes(node))
6145 if unsatisfied_install:
6146 unsatisfied_deps.append(dep)
6148 if masked_tasks or unsatisfied_deps:
6149 # This probably means that a required package
6150 # was dropped via --skipfirst. It makes the
6151 # resume list invalid, so convert it to a
6152 # UnsatisfiedResumeDep exception.
6153 raise self.UnsatisfiedResumeDep(self,
6154 masked_tasks + unsatisfied_deps)
6155 self._serialized_tasks_cache = None
6158 except self._unknown_internal_error:
6163 def _load_favorites(self, favorites):
6165 Use a list of favorites to resume state from a
6166 previous select_files() call. This creates similar
6167 DependencyArg instances to those that would have
6168 been created by the original select_files() call.
6169 This allows Package instances to be matched with
6170 DependencyArg instances during graph creation.
6172 root_config = self.roots[self.target_root]
6173 getSetAtoms = root_config.setconfig.getSetAtoms
6174 sets = root_config.sets
6177 if not isinstance(x, basestring):
6179 if x in ("system", "world"):
6181 if x.startswith(SETPREFIX):
6182 s = x[len(SETPREFIX):]
6187 # Recursively expand sets so that containment tests in
6188 # self._get_parent_sets() properly match atoms in nested
6189 # sets (like if world contains system).
6190 expanded_set = InternalPackageSet(
6191 initial_atoms=getSetAtoms(s))
6192 self._sets[s] = expanded_set
6193 args.append(SetArg(arg=x, set=expanded_set,
6194 root_config=root_config))
6196 if not portage.isvalidatom(x):
6198 args.append(AtomArg(arg=x, atom=x,
6199 root_config=root_config))
6201 self._set_args(args)
6204 class UnsatisfiedResumeDep(portage.exception.PortageException):
6206 A dependency of a resume list is not installed. This
6207 can occur when a required package is dropped from the
6208 merge list via --skipfirst.
6210 def __init__(self, depgraph, value):
6211 portage.exception.PortageException.__init__(self, value)
6212 self.depgraph = depgraph
6214 class _internal_exception(portage.exception.PortageException):
6215 def __init__(self, value=""):
6216 portage.exception.PortageException.__init__(self, value)
6218 class _unknown_internal_error(_internal_exception):
6220 Used by the depgraph internally to terminate graph creation.
6221 The specific reason for the failure should have been dumped
6222 to stderr, unfortunately, the exact reason for the failure
6226 class _serialize_tasks_retry(_internal_exception):
6228 This is raised by the _serialize_tasks() method when it needs to
6229 be called again for some reason. The only case that it's currently
6230 used for is when neglected dependencies need to be added to the
6231 graph in order to avoid making a potentially unsafe decision.
6234 class _dep_check_composite_db(portage.dbapi):
6236 A dbapi-like interface that is optimized for use in dep_check() calls.
6237 This is built on top of the existing depgraph package selection logic.
6238 Some packages that have been added to the graph may be masked from this
6239 view in order to influence the atom preference selection that occurs
6242 def __init__(self, depgraph, root):
6243 portage.dbapi.__init__(self)
6244 self._depgraph = depgraph
6246 self._match_cache = {}
6247 self._cpv_pkg_map = {}
6249 def _clear_cache(self):
6250 self._match_cache.clear()
6251 self._cpv_pkg_map.clear()
6253 def match(self, atom):
6254 ret = self._match_cache.get(atom)
6259 atom = self._dep_expand(atom)
6260 pkg, existing = self._depgraph._select_package(self._root, atom)
6264 # Return the highest available from select_package() as well as
6265 # any matching slots in the graph db.
6267 slots.add(pkg.metadata["SLOT"])
6268 atom_cp = portage.dep_getkey(atom)
6269 if pkg.cp.startswith("virtual/"):
6270 # For new-style virtual lookahead that occurs inside
6271 # dep_check(), examine all slots. This is needed
6272 # so that newer slots will not unnecessarily be pulled in
6273 # when a satisfying lower slot is already installed. For
6274 # example, if virtual/jdk-1.4 is satisfied via kaffe then
6275 # there's no need to pull in a newer slot to satisfy a
6276 # virtual/jdk dependency.
6277 for db, pkg_type, built, installed, db_keys in \
6278 self._depgraph._filtered_trees[self._root]["dbs"]:
6279 for cpv in db.match(atom):
6280 if portage.cpv_getkey(cpv) != pkg.cp:
6282 slots.add(db.aux_get(cpv, ["SLOT"])[0])
6284 if self._visible(pkg):
6285 self._cpv_pkg_map[pkg.cpv] = pkg
6287 slots.remove(pkg.metadata["SLOT"])
6289 slot_atom = "%s:%s" % (atom_cp, slots.pop())
6290 pkg, existing = self._depgraph._select_package(
6291 self._root, slot_atom)
6294 if not self._visible(pkg):
6296 self._cpv_pkg_map[pkg.cpv] = pkg
6299 self._cpv_sort_ascending(ret)
6300 self._match_cache[orig_atom] = ret
6303 def _visible(self, pkg):
6304 if pkg.installed and "selective" not in self._depgraph.myparams:
6306 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
6307 except (StopIteration, portage.exception.InvalidDependString):
6314 self._depgraph.pkgsettings[pkg.root], pkg):
6316 except portage.exception.InvalidDependString:
6318 in_graph = self._depgraph._slot_pkg_map[
6319 self._root].get(pkg.slot_atom)
6320 if in_graph is None:
6321 # Mask choices for packages which are not the highest visible
6322 # version within their slot (since they usually trigger slot
6324 highest_visible, in_graph = self._depgraph._select_package(
6325 self._root, pkg.slot_atom)
6326 if pkg != highest_visible:
6328 elif in_graph != pkg:
6329 # Mask choices for packages that would trigger a slot
6330 # conflict with a previously selected package.
6334 def _dep_expand(self, atom):
6336 This is only needed for old installed packages that may
6337 contain atoms that are not fully qualified with a specific
6338 category. Emulate the cpv_expand() function that's used by
6339 dbapi.match() in cases like this. If there are multiple
6340 matches, it's often due to a new-style virtual that has
6341 been added, so try to filter those out to avoid raising
6344 root_config = self._depgraph.roots[self._root]
6346 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
6347 if len(expanded_atoms) > 1:
6348 non_virtual_atoms = []
6349 for x in expanded_atoms:
6350 if not portage.dep_getkey(x).startswith("virtual/"):
6351 non_virtual_atoms.append(x)
6352 if len(non_virtual_atoms) == 1:
6353 expanded_atoms = non_virtual_atoms
6354 if len(expanded_atoms) > 1:
6355 # compatible with portage.cpv_expand()
6356 raise portage.exception.AmbiguousPackageName(
6357 [portage.dep_getkey(x) for x in expanded_atoms])
6359 atom = expanded_atoms[0]
6361 null_atom = insert_category_into_atom(atom, "null")
6362 null_cp = portage.dep_getkey(null_atom)
6363 cat, atom_pn = portage.catsplit(null_cp)
6364 virts_p = root_config.settings.get_virts_p().get(atom_pn)
6366 # Allow the resolver to choose which virtual.
6367 atom = insert_category_into_atom(atom, "virtual")
6369 atom = insert_category_into_atom(atom, "null")
6372 def aux_get(self, cpv, wants):
6373 metadata = self._cpv_pkg_map[cpv].metadata
6374 return [metadata.get(x, "") for x in wants]
6376 class PackageCounters(object):
6386 self.blocks_satisfied = 0
6388 self.restrict_fetch = 0
6389 self.restrict_fetch_satisfied = 0
6390 self.interactive = 0
6393 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
6396 myoutput.append("Total: %s package" % total_installs)
6397 if total_installs != 1:
6398 myoutput.append("s")
6399 if total_installs != 0:
6400 myoutput.append(" (")
6401 if self.upgrades > 0:
6402 details.append("%s upgrade" % self.upgrades)
6403 if self.upgrades > 1:
6405 if self.downgrades > 0:
6406 details.append("%s downgrade" % self.downgrades)
6407 if self.downgrades > 1:
6410 details.append("%s new" % self.new)
6411 if self.newslot > 0:
6412 details.append("%s in new slot" % self.newslot)
6413 if self.newslot > 1:
6416 details.append("%s reinstall" % self.reinst)
6420 details.append("%s uninstall" % self.uninst)
6423 if self.interactive > 0:
6424 details.append("%s %s" % (self.interactive,
6425 colorize("WARN", "interactive")))
6426 myoutput.append(", ".join(details))
6427 if total_installs != 0:
6428 myoutput.append(")")
6429 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
6430 if self.restrict_fetch:
6431 myoutput.append("\nFetch Restriction: %s package" % \
6432 self.restrict_fetch)
6433 if self.restrict_fetch > 1:
6434 myoutput.append("s")
6435 if self.restrict_fetch_satisfied < self.restrict_fetch:
6436 myoutput.append(bad(" (%s unsatisfied)") % \
6437 (self.restrict_fetch - self.restrict_fetch_satisfied))
6439 myoutput.append("\nConflict: %s block" % \
6442 myoutput.append("s")
6443 if self.blocks_satisfied < self.blocks:
6444 myoutput.append(bad(" (%s unsatisfied)") % \
6445 (self.blocks - self.blocks_satisfied))
6446 return "".join(myoutput)
6449 _can_poll_device = None
6451 def can_poll_device():
6453 Test if it's possible to use poll() on a device such as a pty. This
6454 is known to fail on Darwin.
6456 @returns: True if poll() on a device succeeds, False otherwise.
6459 global _can_poll_device
6460 if _can_poll_device is not None:
6461 return _can_poll_device
6463 if not hasattr(select, "poll"):
6464 _can_poll_device = False
6465 return _can_poll_device
6468 dev_null = open('/dev/null', 'rb')
6470 _can_poll_device = False
6471 return _can_poll_device
6474 p.register(dev_null.fileno(), PollConstants.POLLIN)
6476 invalid_request = False
6477 for f, event in p.poll():
6478 if event & PollConstants.POLLNVAL:
6479 invalid_request = True
6483 _can_poll_device = not invalid_request
6484 return _can_poll_device
6486 def create_poll_instance():
6488 Create an instance of select.poll, or an instance of
6489 PollSelectAdapter there is no poll() implementation or
6490 it is broken somehow.
6492 if can_poll_device():
6493 return select.poll()
6494 return PollSelectAdapter()
6496 class PollScheduler(object):
6498 class _sched_iface_class(SlotObject):
6499 __slots__ = ("register", "schedule", "unregister")
6503 self._max_load = None
6505 self._poll_event_queue = []
6506 self._poll_event_handlers = {}
6507 self._poll_event_handler_ids = {}
6508 # Increment id for each new handler.
6509 self._event_handler_id = 0
6510 self._poll_obj = create_poll_instance()
6511 self._scheduling = False
6513 def _schedule(self):
6515 Calls _schedule_tasks() and automatically returns early from
6516 any recursive calls to this method that the _schedule_tasks()
6517 call might trigger. This makes _schedule() safe to call from
6518 inside exit listeners.
6520 if self._scheduling:
6522 self._scheduling = True
6524 return self._schedule_tasks()
6526 self._scheduling = False
6528 def _running_job_count(self):
6531 def _can_add_job(self):
6532 max_jobs = self._max_jobs
6533 max_load = self._max_load
6535 if self._max_jobs is not True and \
6536 self._running_job_count() >= self._max_jobs:
6539 if max_load is not None and \
6540 (max_jobs is True or max_jobs > 1) and \
6541 self._running_job_count() >= 1:
6543 avg1, avg5, avg15 = getloadavg()
6547 if avg1 >= max_load:
6552 def _poll(self, timeout=None):
6554 All poll() calls pass through here. The poll events
6555 are added directly to self._poll_event_queue.
6556 In order to avoid endless blocking, this raises
6557 StopIteration if timeout is None and there are
6558 no file descriptors to poll.
6560 if not self._poll_event_handlers:
6562 if timeout is None and \
6563 not self._poll_event_handlers:
6564 raise StopIteration(
6565 "timeout is None and there are no poll() event handlers")
6567 # The following error is known to occur with Linux kernel versions
6570 # select.error: (4, 'Interrupted system call')
6572 # This error has been observed after a SIGSTOP, followed by SIGCONT.
6573 # Treat it similar to EAGAIN if timeout is None, otherwise just return
6574 # without any events.
6577 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
6579 except select.error, e:
6580 writemsg_level("\n!!! select error: %s\n" % (e,),
6581 level=logging.ERROR, noiselevel=-1)
6583 if timeout is not None:
6586 def _next_poll_event(self, timeout=None):
6588 Since the _schedule_wait() loop is called by event
6589 handlers from _poll_loop(), maintain a central event
6590 queue for both of them to share events from a single
6591 poll() call. In order to avoid endless blocking, this
6592 raises StopIteration if timeout is None and there are
6593 no file descriptors to poll.
6595 if not self._poll_event_queue:
6597 return self._poll_event_queue.pop()
6599 def _poll_loop(self):
6601 event_handlers = self._poll_event_handlers
6602 event_handled = False
6605 while event_handlers:
6606 f, event = self._next_poll_event()
6607 handler, reg_id = event_handlers[f]
6609 event_handled = True
6610 except StopIteration:
6611 event_handled = True
6613 if not event_handled:
6614 raise AssertionError("tight loop")
6616 def _schedule_yield(self):
6618 Schedule for a short period of time chosen by the scheduler based
6619 on internal state. Synchronous tasks should call this periodically
6620 in order to allow the scheduler to service pending poll events. The
6621 scheduler will call poll() exactly once, without blocking, and any
6622 resulting poll events will be serviced.
6624 event_handlers = self._poll_event_handlers
6627 if not event_handlers:
6628 return bool(events_handled)
6630 if not self._poll_event_queue:
6634 while event_handlers and self._poll_event_queue:
6635 f, event = self._next_poll_event()
6636 handler, reg_id = event_handlers[f]
6639 except StopIteration:
6642 return bool(events_handled)
6644 def _register(self, f, eventmask, handler):
6647 @return: A unique registration id, for use in schedule() or
6650 if f in self._poll_event_handlers:
6651 raise AssertionError("fd %d is already registered" % f)
6652 self._event_handler_id += 1
6653 reg_id = self._event_handler_id
6654 self._poll_event_handler_ids[reg_id] = f
6655 self._poll_event_handlers[f] = (handler, reg_id)
6656 self._poll_obj.register(f, eventmask)
6659 def _unregister(self, reg_id):
6660 f = self._poll_event_handler_ids[reg_id]
6661 self._poll_obj.unregister(f)
6662 del self._poll_event_handlers[f]
6663 del self._poll_event_handler_ids[reg_id]
6665 def _schedule_wait(self, wait_ids):
6667 Schedule until wait_id is not longer registered
6670 @param wait_id: a task id to wait for
6672 event_handlers = self._poll_event_handlers
6673 handler_ids = self._poll_event_handler_ids
6674 event_handled = False
6676 if isinstance(wait_ids, int):
6677 wait_ids = frozenset([wait_ids])
6680 while wait_ids.intersection(handler_ids):
6681 f, event = self._next_poll_event()
6682 handler, reg_id = event_handlers[f]
6684 event_handled = True
6685 except StopIteration:
6686 event_handled = True
6688 return event_handled
6690 class QueueScheduler(PollScheduler):
6693 Add instances of SequentialTaskQueue and then call run(). The
6694 run() method returns when no tasks remain.
6697 def __init__(self, max_jobs=None, max_load=None):
6698 PollScheduler.__init__(self)
6700 if max_jobs is None:
6703 self._max_jobs = max_jobs
6704 self._max_load = max_load
6705 self.sched_iface = self._sched_iface_class(
6706 register=self._register,
6707 schedule=self._schedule_wait,
6708 unregister=self._unregister)
6711 self._schedule_listeners = []
6714 self._queues.append(q)
6716 def remove(self, q):
6717 self._queues.remove(q)
6721 while self._schedule():
6724 while self._running_job_count():
6727 def _schedule_tasks(self):
6730 @returns: True if there may be remaining tasks to schedule,
6733 while self._can_add_job():
6734 n = self._max_jobs - self._running_job_count()
6738 if not self._start_next_job(n):
6741 for q in self._queues:
6746 def _running_job_count(self):
6748 for q in self._queues:
6749 job_count += len(q.running_tasks)
6750 self._jobs = job_count
6753 def _start_next_job(self, n=1):
6755 for q in self._queues:
6756 initial_job_count = len(q.running_tasks)
6758 final_job_count = len(q.running_tasks)
6759 if final_job_count > initial_job_count:
6760 started_count += (final_job_count - initial_job_count)
6761 if started_count >= n:
6763 return started_count
6765 class TaskScheduler(object):
6768 A simple way to handle scheduling of AsynchrousTask instances. Simply
6769 add tasks and call run(). The run() method returns when no tasks remain.
6772 def __init__(self, max_jobs=None, max_load=None):
6773 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
6774 self._scheduler = QueueScheduler(
6775 max_jobs=max_jobs, max_load=max_load)
6776 self.sched_iface = self._scheduler.sched_iface
6777 self.run = self._scheduler.run
6778 self._scheduler.add(self._queue)
6780 def add(self, task):
6781 self._queue.add(task)
6783 class Scheduler(PollScheduler):
6785 _opts_ignore_blockers = \
6786 frozenset(["--buildpkgonly",
6787 "--fetchonly", "--fetch-all-uri",
6788 "--nodeps", "--pretend"])
6790 _opts_no_background = \
6791 frozenset(["--pretend",
6792 "--fetchonly", "--fetch-all-uri"])
6794 _opts_no_restart = frozenset(["--buildpkgonly",
6795 "--fetchonly", "--fetch-all-uri", "--pretend"])
6797 _bad_resume_opts = set(["--ask", "--changelog",
6798 "--resume", "--skipfirst"])
6800 _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
6802 class _iface_class(SlotObject):
6803 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
6804 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
6805 "scheduleSetup", "scheduleUnpack", "scheduleYield",
6808 class _fetch_iface_class(SlotObject):
6809 __slots__ = ("log_file", "schedule")
6811 _task_queues_class = slot_dict_class(
6812 ("merge", "jobs", "fetch", "unpack"), prefix="")
6814 class _build_opts_class(SlotObject):
6815 __slots__ = ("buildpkg", "buildpkgonly",
6816 "fetch_all_uri", "fetchonly", "pretend")
6818 class _binpkg_opts_class(SlotObject):
6819 __slots__ = ("fetchonly", "getbinpkg", "pretend")
6821 class _pkg_count_class(SlotObject):
6822 __slots__ = ("curval", "maxval")
6824 class _emerge_log_class(SlotObject):
6825 __slots__ = ("xterm_titles",)
6827 def log(self, *pargs, **kwargs):
6828 if not self.xterm_titles:
6829 # Avoid interference with the scheduler's status display.
6830 kwargs.pop("short_msg", None)
6831 emergelog(self.xterm_titles, *pargs, **kwargs)
6833 class _failed_pkg(SlotObject):
6834 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
6836 class _ConfigPool(object):
6837 """Interface for a task to temporarily allocate a config
6838 instance from a pool. This allows a task to be constructed
6839 long before the config instance actually becomes needed, like
6840 when prefetchers are constructed for the whole merge list."""
6841 __slots__ = ("_root", "_allocate", "_deallocate")
6842 def __init__(self, root, allocate, deallocate):
6844 self._allocate = allocate
6845 self._deallocate = deallocate
6847 return self._allocate(self._root)
6848 def deallocate(self, settings):
6849 self._deallocate(settings)
6851 class _unknown_internal_error(portage.exception.PortageException):
6853 Used internally to terminate scheduling. The specific reason for
6854 the failure should have been dumped to stderr.
6856 def __init__(self, value=""):
6857 portage.exception.PortageException.__init__(self, value)
6859 def __init__(self, settings, trees, mtimedb, myopts,
6860 spinner, mergelist, favorites, digraph):
6861 PollScheduler.__init__(self)
6862 self.settings = settings
6863 self.target_root = settings["ROOT"]
6865 self.myopts = myopts
6866 self._spinner = spinner
6867 self._mtimedb = mtimedb
6868 self._mergelist = mergelist
6869 self._favorites = favorites
6870 self._args_set = InternalPackageSet(favorites)
6871 self._build_opts = self._build_opts_class()
6872 for k in self._build_opts.__slots__:
6873 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
6874 self._binpkg_opts = self._binpkg_opts_class()
6875 for k in self._binpkg_opts.__slots__:
6876 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
6879 self._logger = self._emerge_log_class()
6880 self._task_queues = self._task_queues_class()
6881 for k in self._task_queues.allowed_keys:
6882 setattr(self._task_queues, k,
6883 SequentialTaskQueue())
6885 # Holds merges that will wait to be executed when no builds are
6886 # executing. This is useful for system packages since dependencies
6887 # on system packages are frequently unspecified.
6888 self._merge_wait_queue = []
6889 # Holds merges that have been transfered from the merge_wait_queue to
6890 # the actual merge queue. They are removed from this list upon
6891 # completion. Other packages can start building only when this list is
6893 self._merge_wait_scheduled = []
6895 # Holds system packages and their deep runtime dependencies. Before
6896 # being merged, these packages go to merge_wait_queue, to be merged
6897 # when no other packages are building.
6898 self._deep_system_deps = set()
6900 # Holds packages to merge which will satisfy currently unsatisfied
6901 # deep runtime dependencies of system packages. If this is not empty
6902 # then no parallel builds will be spawned until it is empty. This
6903 # minimizes the possibility that a build will fail due to the system
6904 # being in a fragile state. For example, see bug #259954.
6905 self._unsatisfied_system_deps = set()
6907 self._status_display = JobStatusDisplay(
6908 xterm_titles=('notitles' not in settings.features))
6909 self._max_load = myopts.get("--load-average")
6910 max_jobs = myopts.get("--jobs")
6911 if max_jobs is None:
6913 self._set_max_jobs(max_jobs)
6915 # The root where the currently running
6916 # portage instance is installed.
6917 self._running_root = trees["/"]["root_config"]
6919 if settings.get("PORTAGE_DEBUG", "") == "1":
6921 self.pkgsettings = {}
6922 self._config_pool = {}
6923 self._blocker_db = {}
6925 self._config_pool[root] = []
6926 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
6928 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
6929 schedule=self._schedule_fetch)
6930 self._sched_iface = self._iface_class(
6931 dblinkEbuildPhase=self._dblink_ebuild_phase,
6932 dblinkDisplayMerge=self._dblink_display_merge,
6933 dblinkElog=self._dblink_elog,
6934 dblinkEmergeLog=self._dblink_emerge_log,
6935 fetch=fetch_iface, register=self._register,
6936 schedule=self._schedule_wait,
6937 scheduleSetup=self._schedule_setup,
6938 scheduleUnpack=self._schedule_unpack,
6939 scheduleYield=self._schedule_yield,
6940 unregister=self._unregister)
6942 self._prefetchers = weakref.WeakValueDictionary()
6943 self._pkg_queue = []
6944 self._completed_tasks = set()
6946 self._failed_pkgs = []
6947 self._failed_pkgs_all = []
6948 self._failed_pkgs_die_msgs = []
6949 self._post_mod_echo_msgs = []
6950 self._parallel_fetch = False
6951 merge_count = len([x for x in mergelist \
6952 if isinstance(x, Package) and x.operation == "merge"])
6953 self._pkg_count = self._pkg_count_class(
6954 curval=0, maxval=merge_count)
6955 self._status_display.maxval = self._pkg_count.maxval
6957 # The load average takes some time to respond when new
6958 # jobs are added, so we need to limit the rate of adding
6960 self._job_delay_max = 10
6961 self._job_delay_factor = 1.0
6962 self._job_delay_exp = 1.5
6963 self._previous_job_start_time = None
6965 self._set_digraph(digraph)
6967 # This is used to memoize the _choose_pkg() result when
6968 # no packages can be chosen until one of the existing
6970 self._choose_pkg_return_early = False
6972 features = self.settings.features
6973 if "parallel-fetch" in features and \
6974 not ("--pretend" in self.myopts or \
6975 "--fetch-all-uri" in self.myopts or \
6976 "--fetchonly" in self.myopts):
6977 if "distlocks" not in features:
6978 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
6979 portage.writemsg(red("!!!")+" parallel-fetching " + \
6980 "requires the distlocks feature enabled"+"\n",
6982 portage.writemsg(red("!!!")+" you have it disabled, " + \
6983 "thus parallel-fetching is being disabled"+"\n",
6985 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
6986 elif len(mergelist) > 1:
6987 self._parallel_fetch = True
6989 if self._parallel_fetch:
6990 # clear out existing fetch log if it exists
6992 open(self._fetch_log, 'w')
6993 except EnvironmentError:
6996 self._running_portage = None
6997 portage_match = self._running_root.trees["vartree"].dbapi.match(
6998 portage.const.PORTAGE_PACKAGE_ATOM)
7000 cpv = portage_match.pop()
7001 self._running_portage = self._pkg(cpv, "installed",
7002 self._running_root, installed=True)
7004 def _poll(self, timeout=None):
7006 PollScheduler._poll(self, timeout=timeout)
7008 def _set_max_jobs(self, max_jobs):
7009 self._max_jobs = max_jobs
7010 self._task_queues.jobs.max_jobs = max_jobs
7012 def _background_mode(self):
7014 Check if background mode is enabled and adjust states as necessary.
7017 @returns: True if background mode is enabled, False otherwise.
7019 background = (self._max_jobs is True or \
7020 self._max_jobs > 1 or "--quiet" in self.myopts) and \
7021 not bool(self._opts_no_background.intersection(self.myopts))
7024 interactive_tasks = self._get_interactive_tasks()
7025 if interactive_tasks:
7027 writemsg_level(">>> Sending package output to stdio due " + \
7028 "to interactive package(s):\n",
7029 level=logging.INFO, noiselevel=-1)
7031 for pkg in interactive_tasks:
7032 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
7034 pkg_str += " for " + pkg.root
7037 writemsg_level("".join("%s\n" % (l,) for l in msg),
7038 level=logging.INFO, noiselevel=-1)
7039 if self._max_jobs is True or self._max_jobs > 1:
7040 self._set_max_jobs(1)
7041 writemsg_level(">>> Setting --jobs=1 due " + \
7042 "to the above interactive package(s)\n",
7043 level=logging.INFO, noiselevel=-1)
7045 self._status_display.quiet = \
7047 ("--quiet" in self.myopts and \
7048 "--verbose" not in self.myopts)
7050 self._logger.xterm_titles = \
7051 "notitles" not in self.settings.features and \
7052 self._status_display.quiet
7056 def _get_interactive_tasks(self):
7057 from portage import flatten
7058 from portage.dep import use_reduce, paren_reduce
7059 interactive_tasks = []
7060 for task in self._mergelist:
7061 if not (isinstance(task, Package) and \
7062 task.operation == "merge"):
7065 properties = flatten(use_reduce(paren_reduce(
7066 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
7067 except portage.exception.InvalidDependString, e:
7068 show_invalid_depstring_notice(task,
7069 task.metadata["PROPERTIES"], str(e))
7070 raise self._unknown_internal_error()
7071 if "interactive" in properties:
7072 interactive_tasks.append(task)
7073 return interactive_tasks
7075 def _set_digraph(self, digraph):
7076 if "--nodeps" in self.myopts or \
7077 (self._max_jobs is not True and self._max_jobs < 2):
7079 self._digraph = None
7082 self._digraph = digraph
7083 self._find_system_deps()
7084 self._prune_digraph()
7085 self._prevent_builddir_collisions()
7087 def _find_system_deps(self):
7089 Find system packages and their deep runtime dependencies. Before being
7090 merged, these packages go to merge_wait_queue, to be merged when no
7091 other packages are building.
7093 deep_system_deps = self._deep_system_deps
7094 deep_system_deps.clear()
7095 deep_system_deps.update(
7096 _find_deep_system_runtime_deps(self._digraph))
7097 deep_system_deps.difference_update([pkg for pkg in \
7098 deep_system_deps if pkg.operation != "merge"])
7100 def _prune_digraph(self):
7102 Prune any root nodes that are irrelevant.
7105 graph = self._digraph
7106 completed_tasks = self._completed_tasks
7107 removed_nodes = set()
7109 for node in graph.root_nodes():
7110 if not isinstance(node, Package) or \
7111 (node.installed and node.operation == "nomerge") or \
7113 node in completed_tasks:
7114 removed_nodes.add(node)
7116 graph.difference_update(removed_nodes)
7117 if not removed_nodes:
7119 removed_nodes.clear()
7121 def _prevent_builddir_collisions(self):
7123 When building stages, sometimes the same exact cpv needs to be merged
7124 to both $ROOTs. Add edges to the digraph in order to avoid collisions
7125 in the builddir. Currently, normal file locks would be inappropriate
7126 for this purpose since emerge holds all of it's build dir locks from
7130 for pkg in self._mergelist:
7131 if not isinstance(pkg, Package):
7132 # a satisfied blocker
7136 if pkg.cpv not in cpv_map:
7137 cpv_map[pkg.cpv] = [pkg]
7139 for earlier_pkg in cpv_map[pkg.cpv]:
7140 self._digraph.add(earlier_pkg, pkg,
7141 priority=DepPriority(buildtime=True))
7142 cpv_map[pkg.cpv].append(pkg)
7144 class _pkg_failure(portage.exception.PortageException):
7146 An instance of this class is raised by unmerge() when
7147 an uninstallation fails.
7150 def __init__(self, *pargs):
7151 portage.exception.PortageException.__init__(self, pargs)
7153 self.status = pargs[0]
7155 def _schedule_fetch(self, fetcher):
7157 Schedule a fetcher on the fetch queue, in order to
7158 serialize access to the fetch log.
7160 self._task_queues.fetch.addFront(fetcher)
7162 def _schedule_setup(self, setup_phase):
7164 Schedule a setup phase on the merge queue, in order to
7165 serialize unsandboxed access to the live filesystem.
7167 self._task_queues.merge.addFront(setup_phase)
7170 def _schedule_unpack(self, unpack_phase):
7172 Schedule an unpack phase on the unpack queue, in order
7173 to serialize $DISTDIR access for live ebuilds.
7175 self._task_queues.unpack.add(unpack_phase)
7177 def _find_blockers(self, new_pkg):
7179 Returns a callable which should be called only when
7180 the vdb lock has been acquired.
7183 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
7186 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
7187 if self._opts_ignore_blockers.intersection(self.myopts):
7190 # Call gc.collect() here to avoid heap overflow that
7191 # triggers 'Cannot allocate memory' errors (reported
7196 blocker_db = self._blocker_db[new_pkg.root]
7198 blocker_dblinks = []
7199 for blocking_pkg in blocker_db.findInstalledBlockers(
7200 new_pkg, acquire_lock=acquire_lock):
7201 if new_pkg.slot_atom == blocking_pkg.slot_atom:
7203 if new_pkg.cpv == blocking_pkg.cpv:
7205 blocker_dblinks.append(portage.dblink(
7206 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
7207 self.pkgsettings[blocking_pkg.root], treetype="vartree",
7208 vartree=self.trees[blocking_pkg.root]["vartree"]))
7212 return blocker_dblinks
7214 def _dblink_pkg(self, pkg_dblink):
7215 cpv = pkg_dblink.mycpv
7216 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
7217 root_config = self.trees[pkg_dblink.myroot]["root_config"]
7218 installed = type_name == "installed"
7219 return self._pkg(cpv, type_name, root_config, installed=installed)
7221 def _append_to_log_path(self, log_path, msg):
7222 f = open(log_path, 'a')
7228 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
7230 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7233 background = self._background
7235 if background and log_path is not None:
7236 log_file = open(log_path, 'a')
7241 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
7243 if log_file is not None:
7246 def _dblink_emerge_log(self, msg):
7247 self._logger.log(msg)
7249 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
7250 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7251 background = self._background
7253 if log_path is None:
7254 if not (background and level < logging.WARN):
7255 portage.util.writemsg_level(msg,
7256 level=level, noiselevel=noiselevel)
7259 portage.util.writemsg_level(msg,
7260 level=level, noiselevel=noiselevel)
7261 self._append_to_log_path(log_path, msg)
7263 def _dblink_ebuild_phase(self,
7264 pkg_dblink, pkg_dbapi, ebuild_path, phase):
7266 Using this callback for merge phases allows the scheduler
7267 to run while these phases execute asynchronously, and allows
7268 the scheduler control output handling.
7271 scheduler = self._sched_iface
7272 settings = pkg_dblink.settings
7273 pkg = self._dblink_pkg(pkg_dblink)
7274 background = self._background
7275 log_path = settings.get("PORTAGE_LOG_FILE")
7277 ebuild_phase = EbuildPhase(background=background,
7278 pkg=pkg, phase=phase, scheduler=scheduler,
7279 settings=settings, tree=pkg_dblink.treetype)
7280 ebuild_phase.start()
7283 return ebuild_phase.returncode
7285 def _generate_digests(self):
7287 Generate digests if necessary for --digests or FEATURES=digest.
7288 In order to avoid interference, this must done before parallel
7292 if '--fetchonly' in self.myopts:
7295 digest = '--digest' in self.myopts
7297 for pkgsettings in self.pkgsettings.itervalues():
7298 if 'digest' in pkgsettings.features:
7305 for x in self._mergelist:
7306 if not isinstance(x, Package) or \
7307 x.type_name != 'ebuild' or \
7308 x.operation != 'merge':
7310 pkgsettings = self.pkgsettings[x.root]
7311 if '--digest' not in self.myopts and \
7312 'digest' not in pkgsettings.features:
7314 portdb = x.root_config.trees['porttree'].dbapi
7315 ebuild_path = portdb.findname(x.cpv)
7318 "!!! Could not locate ebuild for '%s'.\n" \
7319 % x.cpv, level=logging.ERROR, noiselevel=-1)
7321 pkgsettings['O'] = os.path.dirname(ebuild_path)
7322 if not portage.digestgen([], pkgsettings, myportdb=portdb):
7324 "!!! Unable to generate manifest for '%s'.\n" \
7325 % x.cpv, level=logging.ERROR, noiselevel=-1)
7330 def _check_manifests(self):
7331 # Verify all the manifests now so that the user is notified of failure
7332 # as soon as possible.
7333 if "strict" not in self.settings.features or \
7334 "--fetchonly" in self.myopts or \
7335 "--fetch-all-uri" in self.myopts:
7338 shown_verifying_msg = False
7340 for myroot, pkgsettings in self.pkgsettings.iteritems():
7341 quiet_config = portage.config(clone=pkgsettings)
7342 quiet_config["PORTAGE_QUIET"] = "1"
7343 quiet_config.backup_changes("PORTAGE_QUIET")
7344 quiet_settings[myroot] = quiet_config
7347 for x in self._mergelist:
7348 if not isinstance(x, Package) or \
7349 x.type_name != "ebuild":
7352 if not shown_verifying_msg:
7353 shown_verifying_msg = True
7354 self._status_msg("Verifying ebuild manifests")
7356 root_config = x.root_config
7357 portdb = root_config.trees["porttree"].dbapi
7358 quiet_config = quiet_settings[root_config.root]
7359 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
7360 if not portage.digestcheck([], quiet_config, strict=True):
7365 def _add_prefetchers(self):
7367 if not self._parallel_fetch:
7370 if self._parallel_fetch:
7371 self._status_msg("Starting parallel fetch")
7373 prefetchers = self._prefetchers
7374 getbinpkg = "--getbinpkg" in self.myopts
7376 # In order to avoid "waiting for lock" messages
7377 # at the beginning, which annoy users, never
7378 # spawn a prefetcher for the first package.
7379 for pkg in self._mergelist[1:]:
7380 prefetcher = self._create_prefetcher(pkg)
7381 if prefetcher is not None:
7382 self._task_queues.fetch.add(prefetcher)
7383 prefetchers[pkg] = prefetcher
7385 def _create_prefetcher(self, pkg):
7387 @return: a prefetcher, or None if not applicable
7391 if not isinstance(pkg, Package):
7394 elif pkg.type_name == "ebuild":
7396 prefetcher = EbuildFetcher(background=True,
7397 config_pool=self._ConfigPool(pkg.root,
7398 self._allocate_config, self._deallocate_config),
7399 fetchonly=1, logfile=self._fetch_log,
7400 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
7402 elif pkg.type_name == "binary" and \
7403 "--getbinpkg" in self.myopts and \
7404 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
7406 prefetcher = BinpkgPrefetcher(background=True,
7407 pkg=pkg, scheduler=self._sched_iface)
7411 def _is_restart_scheduled(self):
7413 Check if the merge list contains a replacement
7414 for the current running instance, that will result
7415 in restart after merge.
7417 @returns: True if a restart is scheduled, False otherwise.
7419 if self._opts_no_restart.intersection(self.myopts):
7422 mergelist = self._mergelist
7424 for i, pkg in enumerate(mergelist):
7425 if self._is_restart_necessary(pkg) and \
7426 i != len(mergelist) - 1:
7431 def _is_restart_necessary(self, pkg):
7433 @return: True if merging the given package
7434 requires restart, False otherwise.
7437 # Figure out if we need a restart.
7438 if pkg.root == self._running_root.root and \
7439 portage.match_from_list(
7440 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
7441 if self._running_portage:
7442 return pkg.cpv != self._running_portage.cpv
7446 def _restart_if_necessary(self, pkg):
7448 Use execv() to restart emerge. This happens
7449 if portage upgrades itself and there are
7450 remaining packages in the list.
7453 if self._opts_no_restart.intersection(self.myopts):
7456 if not self._is_restart_necessary(pkg):
7459 if pkg == self._mergelist[-1]:
7462 self._main_loop_cleanup()
7464 logger = self._logger
7465 pkg_count = self._pkg_count
7466 mtimedb = self._mtimedb
7467 bad_resume_opts = self._bad_resume_opts
7469 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
7470 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
7472 logger.log(" *** RESTARTING " + \
7473 "emerge via exec() after change of " + \
7476 mtimedb["resume"]["mergelist"].remove(list(pkg))
7478 portage.run_exitfuncs()
7479 mynewargv = [sys.argv[0], "--resume"]
7480 resume_opts = self.myopts.copy()
7481 # For automatic resume, we need to prevent
7482 # any of bad_resume_opts from leaking in
7483 # via EMERGE_DEFAULT_OPTS.
7484 resume_opts["--ignore-default-opts"] = True
7485 for myopt, myarg in resume_opts.iteritems():
7486 if myopt not in bad_resume_opts:
7488 mynewargv.append(myopt)
7490 mynewargv.append(myopt +"="+ str(myarg))
7491 # priority only needs to be adjusted on the first run
7492 os.environ["PORTAGE_NICENESS"] = "0"
7493 os.execv(mynewargv[0], mynewargv)
7497 if "--resume" in self.myopts:
7499 portage.writemsg_stdout(
7500 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
7501 self._logger.log(" *** Resuming merge...")
7503 self._save_resume_list()
7506 self._background = self._background_mode()
7507 except self._unknown_internal_error:
7510 for root in self.trees:
7511 root_config = self.trees[root]["root_config"]
7513 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
7514 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
7515 # for ensuring sane $PWD (bug #239560) and storing elog messages.
7516 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
7517 if not tmpdir or not os.path.isdir(tmpdir):
7518 msg = "The directory specified in your " + \
7519 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
7520 "does not exist. Please create this " + \
7521 "directory or correct your PORTAGE_TMPDIR setting."
7522 msg = textwrap.wrap(msg, 70)
7523 out = portage.output.EOutput()
7528 if self._background:
7529 root_config.settings.unlock()
7530 root_config.settings["PORTAGE_BACKGROUND"] = "1"
7531 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
7532 root_config.settings.lock()
7534 self.pkgsettings[root] = portage.config(
7535 clone=root_config.settings)
7537 rval = self._generate_digests()
7538 if rval != os.EX_OK:
7541 rval = self._check_manifests()
7542 if rval != os.EX_OK:
7545 keep_going = "--keep-going" in self.myopts
7546 fetchonly = self._build_opts.fetchonly
7547 mtimedb = self._mtimedb
7548 failed_pkgs = self._failed_pkgs
7551 rval = self._merge()
7552 if rval == os.EX_OK or fetchonly or not keep_going:
7554 if "resume" not in mtimedb:
7556 mergelist = self._mtimedb["resume"].get("mergelist")
7563 for failed_pkg in failed_pkgs:
7564 mergelist.remove(list(failed_pkg.pkg))
7566 self._failed_pkgs_all.extend(failed_pkgs)
7572 if not self._calc_resume_list():
7575 clear_caches(self.trees)
7576 if not self._mergelist:
7579 self._save_resume_list()
7580 self._pkg_count.curval = 0
7581 self._pkg_count.maxval = len([x for x in self._mergelist \
7582 if isinstance(x, Package) and x.operation == "merge"])
7583 self._status_display.maxval = self._pkg_count.maxval
7585 self._logger.log(" *** Finished. Cleaning up...")
7588 self._failed_pkgs_all.extend(failed_pkgs)
7591 background = self._background
7592 failure_log_shown = False
7593 if background and len(self._failed_pkgs_all) == 1:
7594 # If only one package failed then just show it's
7595 # whole log for easy viewing.
7596 failed_pkg = self._failed_pkgs_all[-1]
7597 build_dir = failed_pkg.build_dir
7600 log_paths = [failed_pkg.build_log]
7602 log_path = self._locate_failure_log(failed_pkg)
7603 if log_path is not None:
7605 log_file = open(log_path)
7609 if log_file is not None:
7611 for line in log_file:
7612 writemsg_level(line, noiselevel=-1)
7615 failure_log_shown = True
7617 # Dump mod_echo output now since it tends to flood the terminal.
7618 # This allows us to avoid having more important output, generated
7619 # later, from being swept away by the mod_echo output.
7620 mod_echo_output = _flush_elog_mod_echo()
7622 if background and not failure_log_shown and \
7623 self._failed_pkgs_all and \
7624 self._failed_pkgs_die_msgs and \
7625 not mod_echo_output:
7627 printer = portage.output.EOutput()
7628 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
7630 if mysettings["ROOT"] != "/":
7631 root_msg = " merged to %s" % mysettings["ROOT"]
7633 printer.einfo("Error messages for package %s%s:" % \
7634 (colorize("INFORM", key), root_msg))
7636 for phase in portage.const.EBUILD_PHASES:
7637 if phase not in logentries:
7639 for msgtype, msgcontent in logentries[phase]:
7640 if isinstance(msgcontent, basestring):
7641 msgcontent = [msgcontent]
7642 for line in msgcontent:
7643 printer.eerror(line.strip("\n"))
7645 if self._post_mod_echo_msgs:
7646 for msg in self._post_mod_echo_msgs:
7649 if len(self._failed_pkgs_all) > 1 or \
7650 (self._failed_pkgs_all and "--keep-going" in self.myopts):
7651 if len(self._failed_pkgs_all) > 1:
7652 msg = "The following %d packages have " % \
7653 len(self._failed_pkgs_all) + \
7654 "failed to build or install:"
7656 msg = "The following package has " + \
7657 "failed to build or install:"
7659 writemsg(prefix + "\n", noiselevel=-1)
7660 from textwrap import wrap
7661 for line in wrap(msg, 72):
7662 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
7663 writemsg(prefix + "\n", noiselevel=-1)
7664 for failed_pkg in self._failed_pkgs_all:
7665 writemsg("%s\t%s\n" % (prefix,
7666 colorize("INFORM", str(failed_pkg.pkg))),
7668 writemsg(prefix + "\n", noiselevel=-1)
7672 def _elog_listener(self, mysettings, key, logentries, fulltext):
7673 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
7675 self._failed_pkgs_die_msgs.append(
7676 (mysettings, key, errors))
7678 def _locate_failure_log(self, failed_pkg):
7680 build_dir = failed_pkg.build_dir
7683 log_paths = [failed_pkg.build_log]
7685 for log_path in log_paths:
7690 log_size = os.stat(log_path).st_size
7701 def _add_packages(self):
7702 pkg_queue = self._pkg_queue
7703 for pkg in self._mergelist:
7704 if isinstance(pkg, Package):
7705 pkg_queue.append(pkg)
7706 elif isinstance(pkg, Blocker):
7709 def _system_merge_started(self, merge):
7711 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
7713 graph = self._digraph
7716 pkg = merge.merge.pkg
7718 # Skip this if $ROOT != / since it shouldn't matter if there
7719 # are unsatisfied system runtime deps in this case.
7723 completed_tasks = self._completed_tasks
7724 unsatisfied = self._unsatisfied_system_deps
7726 def ignore_non_runtime_or_satisfied(priority):
7728 Ignore non-runtime and satisfied runtime priorities.
7730 if isinstance(priority, DepPriority) and \
7731 not priority.satisfied and \
7732 (priority.runtime or priority.runtime_post):
7736 # When checking for unsatisfied runtime deps, only check
7737 # direct deps since indirect deps are checked when the
7738 # corresponding parent is merged.
7739 for child in graph.child_nodes(pkg,
7740 ignore_priority=ignore_non_runtime_or_satisfied):
7741 if not isinstance(child, Package) or \
7742 child.operation == 'uninstall':
7746 if child.operation == 'merge' and \
7747 child not in completed_tasks:
7748 unsatisfied.add(child)
7750 def _merge_wait_exit_handler(self, task):
7751 self._merge_wait_scheduled.remove(task)
7752 self._merge_exit(task)
7754 def _merge_exit(self, merge):
7755 self._do_merge_exit(merge)
7756 self._deallocate_config(merge.merge.settings)
7757 if merge.returncode == os.EX_OK and \
7758 not merge.merge.pkg.installed:
7759 self._status_display.curval += 1
7760 self._status_display.merges = len(self._task_queues.merge)
7763 def _do_merge_exit(self, merge):
7764 pkg = merge.merge.pkg
7765 if merge.returncode != os.EX_OK:
7766 settings = merge.merge.settings
7767 build_dir = settings.get("PORTAGE_BUILDDIR")
7768 build_log = settings.get("PORTAGE_LOG_FILE")
7770 self._failed_pkgs.append(self._failed_pkg(
7771 build_dir=build_dir, build_log=build_log,
7773 returncode=merge.returncode))
7774 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
7776 self._status_display.failed = len(self._failed_pkgs)
7779 self._task_complete(pkg)
7780 pkg_to_replace = merge.merge.pkg_to_replace
7781 if pkg_to_replace is not None:
7782 # When a package is replaced, mark it's uninstall
7783 # task complete (if any).
7785 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
7786 self._task_complete(uninst_hash_key)
7791 self._restart_if_necessary(pkg)
7793 # Call mtimedb.commit() after each merge so that
7794 # --resume still works after being interrupted
7795 # by reboot, sigkill or similar.
7796 mtimedb = self._mtimedb
7797 mtimedb["resume"]["mergelist"].remove(list(pkg))
7798 if not mtimedb["resume"]["mergelist"]:
7799 del mtimedb["resume"]
7802 def _build_exit(self, build):
7803 if build.returncode == os.EX_OK:
7805 merge = PackageMerge(merge=build)
7806 if not build.build_opts.buildpkgonly and \
7807 build.pkg in self._deep_system_deps:
7808 # Since dependencies on system packages are frequently
7809 # unspecified, merge them only when no builds are executing.
7810 self._merge_wait_queue.append(merge)
7811 merge.addStartListener(self._system_merge_started)
7813 merge.addExitListener(self._merge_exit)
7814 self._task_queues.merge.add(merge)
7815 self._status_display.merges = len(self._task_queues.merge)
7817 settings = build.settings
7818 build_dir = settings.get("PORTAGE_BUILDDIR")
7819 build_log = settings.get("PORTAGE_LOG_FILE")
7821 self._failed_pkgs.append(self._failed_pkg(
7822 build_dir=build_dir, build_log=build_log,
7824 returncode=build.returncode))
7825 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
7827 self._status_display.failed = len(self._failed_pkgs)
7828 self._deallocate_config(build.settings)
7830 self._status_display.running = self._jobs
7833 def _extract_exit(self, build):
7834 self._build_exit(build)
7836 def _task_complete(self, pkg):
7837 self._completed_tasks.add(pkg)
7838 self._unsatisfied_system_deps.discard(pkg)
7839 self._choose_pkg_return_early = False
7843 self._add_prefetchers()
7844 self._add_packages()
7845 pkg_queue = self._pkg_queue
7846 failed_pkgs = self._failed_pkgs
7847 portage.locks._quiet = self._background
7848 portage.elog._emerge_elog_listener = self._elog_listener
7854 self._main_loop_cleanup()
7855 portage.locks._quiet = False
7856 portage.elog._emerge_elog_listener = None
7858 rval = failed_pkgs[-1].returncode
7862 def _main_loop_cleanup(self):
7863 del self._pkg_queue[:]
7864 self._completed_tasks.clear()
7865 self._deep_system_deps.clear()
7866 self._unsatisfied_system_deps.clear()
7867 self._choose_pkg_return_early = False
7868 self._status_display.reset()
7869 self._digraph = None
7870 self._task_queues.fetch.clear()
7872 def _choose_pkg(self):
7874 Choose a task that has all it's dependencies satisfied.
7877 if self._choose_pkg_return_early:
7880 if self._digraph is None:
7881 if (self._jobs or self._task_queues.merge) and \
7882 not ("--nodeps" in self.myopts and \
7883 (self._max_jobs is True or self._max_jobs > 1)):
7884 self._choose_pkg_return_early = True
7886 return self._pkg_queue.pop(0)
7888 if not (self._jobs or self._task_queues.merge):
7889 return self._pkg_queue.pop(0)
7891 self._prune_digraph()
7894 later = set(self._pkg_queue)
7895 for pkg in self._pkg_queue:
7897 if not self._dependent_on_scheduled_merges(pkg, later):
7901 if chosen_pkg is not None:
7902 self._pkg_queue.remove(chosen_pkg)
7904 if chosen_pkg is None:
7905 # There's no point in searching for a package to
7906 # choose until at least one of the existing jobs
7908 self._choose_pkg_return_early = True
7912 def _dependent_on_scheduled_merges(self, pkg, later):
7914 Traverse the subgraph of the given packages deep dependencies
7915 to see if it contains any scheduled merges.
7916 @param pkg: a package to check dependencies for
7918 @param later: packages for which dependence should be ignored
7919 since they will be merged later than pkg anyway and therefore
7920 delaying the merge of pkg will not result in a more optimal
7924 @returns: True if the package is dependent, False otherwise.
7927 graph = self._digraph
7928 completed_tasks = self._completed_tasks
7931 traversed_nodes = set([pkg])
7932 direct_deps = graph.child_nodes(pkg)
7933 node_stack = direct_deps
7934 direct_deps = frozenset(direct_deps)
7936 node = node_stack.pop()
7937 if node in traversed_nodes:
7939 traversed_nodes.add(node)
7940 if not ((node.installed and node.operation == "nomerge") or \
7941 (node.operation == "uninstall" and \
7942 node not in direct_deps) or \
7943 node in completed_tasks or \
7947 node_stack.extend(graph.child_nodes(node))
7951 def _allocate_config(self, root):
7953 Allocate a unique config instance for a task in order
7954 to prevent interference between parallel tasks.
7956 if self._config_pool[root]:
7957 temp_settings = self._config_pool[root].pop()
7959 temp_settings = portage.config(clone=self.pkgsettings[root])
7960 # Since config.setcpv() isn't guaranteed to call config.reset() due to
7961 # performance reasons, call it here to make sure all settings from the
7962 # previous package get flushed out (such as PORTAGE_LOG_FILE).
7963 temp_settings.reload()
7964 temp_settings.reset()
7965 return temp_settings
7967 def _deallocate_config(self, settings):
7968 self._config_pool[settings["ROOT"]].append(settings)
7970 def _main_loop(self):
7972 # Only allow 1 job max if a restart is scheduled
7973 # due to portage update.
7974 if self._is_restart_scheduled() or \
7975 self._opts_no_background.intersection(self.myopts):
7976 self._set_max_jobs(1)
7978 merge_queue = self._task_queues.merge
7980 while self._schedule():
7981 if self._poll_event_handlers:
7986 if not (self._jobs or merge_queue):
7988 if self._poll_event_handlers:
7991 def _keep_scheduling(self):
7992 return bool(self._pkg_queue and \
7993 not (self._failed_pkgs and not self._build_opts.fetchonly))
7995 def _schedule_tasks(self):
7997 # When the number of jobs drops to zero, process all waiting merges.
7998 if not self._jobs and self._merge_wait_queue:
7999 for task in self._merge_wait_queue:
8000 task.addExitListener(self._merge_wait_exit_handler)
8001 self._task_queues.merge.add(task)
8002 self._status_display.merges = len(self._task_queues.merge)
8003 self._merge_wait_scheduled.extend(self._merge_wait_queue)
8004 del self._merge_wait_queue[:]
8006 self._schedule_tasks_imp()
8007 self._status_display.display()
8010 for q in self._task_queues.values():
8014 # Cancel prefetchers if they're the only reason
8015 # the main poll loop is still running.
8016 if self._failed_pkgs and not self._build_opts.fetchonly and \
8017 not (self._jobs or self._task_queues.merge) and \
8018 self._task_queues.fetch:
8019 self._task_queues.fetch.clear()
8023 self._schedule_tasks_imp()
8024 self._status_display.display()
8026 return self._keep_scheduling()
8028 def _job_delay(self):
8031 @returns: True if job scheduling should be delayed, False otherwise.
8034 if self._jobs and self._max_load is not None:
8036 current_time = time.time()
8038 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
8039 if delay > self._job_delay_max:
8040 delay = self._job_delay_max
8041 if (current_time - self._previous_job_start_time) < delay:
8046 def _schedule_tasks_imp(self):
8049 @returns: True if state changed, False otherwise.
8056 if not self._keep_scheduling():
8057 return bool(state_change)
8059 if self._choose_pkg_return_early or \
8060 self._merge_wait_scheduled or \
8061 (self._jobs and self._unsatisfied_system_deps) or \
8062 not self._can_add_job() or \
8064 return bool(state_change)
8066 pkg = self._choose_pkg()
8068 return bool(state_change)
8072 if not pkg.installed:
8073 self._pkg_count.curval += 1
8075 task = self._task(pkg)
8078 merge = PackageMerge(merge=task)
8079 merge.addExitListener(self._merge_exit)
8080 self._task_queues.merge.add(merge)
8084 self._previous_job_start_time = time.time()
8085 self._status_display.running = self._jobs
8086 task.addExitListener(self._extract_exit)
8087 self._task_queues.jobs.add(task)
8091 self._previous_job_start_time = time.time()
8092 self._status_display.running = self._jobs
8093 task.addExitListener(self._build_exit)
8094 self._task_queues.jobs.add(task)
8096 return bool(state_change)
8098 def _task(self, pkg):
8100 pkg_to_replace = None
8101 if pkg.operation != "uninstall":
8102 vardb = pkg.root_config.trees["vartree"].dbapi
8103 previous_cpv = vardb.match(pkg.slot_atom)
8105 previous_cpv = previous_cpv.pop()
8106 pkg_to_replace = self._pkg(previous_cpv,
8107 "installed", pkg.root_config, installed=True)
8109 task = MergeListItem(args_set=self._args_set,
8110 background=self._background, binpkg_opts=self._binpkg_opts,
8111 build_opts=self._build_opts,
8112 config_pool=self._ConfigPool(pkg.root,
8113 self._allocate_config, self._deallocate_config),
8114 emerge_opts=self.myopts,
8115 find_blockers=self._find_blockers(pkg), logger=self._logger,
8116 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
8117 pkg_to_replace=pkg_to_replace,
8118 prefetcher=self._prefetchers.get(pkg),
8119 scheduler=self._sched_iface,
8120 settings=self._allocate_config(pkg.root),
8121 statusMessage=self._status_msg,
8122 world_atom=self._world_atom)
8126 def _failed_pkg_msg(self, failed_pkg, action, preposition):
8127 pkg = failed_pkg.pkg
8128 msg = "%s to %s %s" % \
8129 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
8131 msg += " %s %s" % (preposition, pkg.root)
8133 log_path = self._locate_failure_log(failed_pkg)
8134 if log_path is not None:
8135 msg += ", Log file:"
8136 self._status_msg(msg)
8138 if log_path is not None:
8139 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
8141 def _status_msg(self, msg):
8143 Display a brief status message (no newlines) in the status display.
8144 This is called by tasks to provide feedback to the user. This
8145 delegates the resposibility of generating \r and \n control characters,
8146 to guarantee that lines are created or erased when necessary and
8150 @param msg: a brief status message (no newlines allowed)
8152 if not self._background:
8153 writemsg_level("\n")
8154 self._status_display.displayMessage(msg)
8156 def _save_resume_list(self):
8158 Do this before verifying the ebuild Manifests since it might
8159 be possible for the user to use --resume --skipfirst get past
8160 a non-essential package with a broken digest.
8162 mtimedb = self._mtimedb
8163 mtimedb["resume"]["mergelist"] = [list(x) \
8164 for x in self._mergelist \
8165 if isinstance(x, Package) and x.operation == "merge"]
8169 def _calc_resume_list(self):
8171 Use the current resume list to calculate a new one,
8172 dropping any packages with unsatisfied deps.
8174 @returns: True if successful, False otherwise.
8176 print colorize("GOOD", "*** Resuming merge...")
8178 if self._show_list():
8179 if "--tree" in self.myopts:
8180 portage.writemsg_stdout("\n" + \
8181 darkgreen("These are the packages that " + \
8182 "would be merged, in reverse order:\n\n"))
8185 portage.writemsg_stdout("\n" + \
8186 darkgreen("These are the packages that " + \
8187 "would be merged, in order:\n\n"))
8189 show_spinner = "--quiet" not in self.myopts and \
8190 "--nodeps" not in self.myopts
8193 print "Calculating dependencies ",
8195 myparams = create_depgraph_params(self.myopts, None)
8199 success, mydepgraph, dropped_tasks = resume_depgraph(
8200 self.settings, self.trees, self._mtimedb, self.myopts,
8201 myparams, self._spinner)
8202 except depgraph.UnsatisfiedResumeDep, exc:
8203 # rename variable to avoid python-3.0 error:
8204 # SyntaxError: can not delete variable 'e' referenced in nested
8207 mydepgraph = e.depgraph
8208 dropped_tasks = set()
8211 print "\b\b... done!"
8214 def unsatisfied_resume_dep_msg():
8215 mydepgraph.display_problems()
8216 out = portage.output.EOutput()
8217 out.eerror("One or more packages are either masked or " + \
8218 "have missing dependencies:")
8221 show_parents = set()
8223 if dep.parent in show_parents:
8225 show_parents.add(dep.parent)
8226 if dep.atom is None:
8227 out.eerror(indent + "Masked package:")
8228 out.eerror(2 * indent + str(dep.parent))
8231 out.eerror(indent + str(dep.atom) + " pulled in by:")
8232 out.eerror(2 * indent + str(dep.parent))
8234 msg = "The resume list contains packages " + \
8235 "that are either masked or have " + \
8236 "unsatisfied dependencies. " + \
8237 "Please restart/continue " + \
8238 "the operation manually, or use --skipfirst " + \
8239 "to skip the first package in the list and " + \
8240 "any other packages that may be " + \
8241 "masked or have missing dependencies."
8242 for line in textwrap.wrap(msg, 72):
8244 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
8247 if success and self._show_list():
8248 mylist = mydepgraph.altlist()
8250 if "--tree" in self.myopts:
8252 mydepgraph.display(mylist, favorites=self._favorites)
8255 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
8257 mydepgraph.display_problems()
8259 mylist = mydepgraph.altlist()
8260 mydepgraph.break_refs(mylist)
8261 mydepgraph.break_refs(dropped_tasks)
8262 self._mergelist = mylist
8263 self._set_digraph(mydepgraph.schedulerGraph())
8266 for task in dropped_tasks:
8267 if not (isinstance(task, Package) and task.operation == "merge"):
8270 msg = "emerge --keep-going:" + \
8273 msg += " for %s" % (pkg.root,)
8274 msg += " dropped due to unsatisfied dependency."
8275 for line in textwrap.wrap(msg, msg_width):
8276 eerror(line, phase="other", key=pkg.cpv)
8277 settings = self.pkgsettings[pkg.root]
8278 # Ensure that log collection from $T is disabled inside
8279 # elog_process(), since any logs that might exist are
8281 settings.pop("T", None)
8282 portage.elog.elog_process(pkg.cpv, settings)
8283 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
8287 def _show_list(self):
8288 myopts = self.myopts
8289 if "--quiet" not in myopts and \
8290 ("--ask" in myopts or "--tree" in myopts or \
8291 "--verbose" in myopts):
8295 def _world_atom(self, pkg):
8297 Add the package to the world file, but only if
8298 it's supposed to be added. Otherwise, do nothing.
8301 if set(("--buildpkgonly", "--fetchonly",
8303 "--oneshot", "--onlydeps",
8304 "--pretend")).intersection(self.myopts):
8307 if pkg.root != self.target_root:
8310 args_set = self._args_set
8311 if not args_set.findAtomForPackage(pkg):
8314 logger = self._logger
8315 pkg_count = self._pkg_count
8316 root_config = pkg.root_config
8317 world_set = root_config.sets["world"]
8318 world_locked = False
8319 if hasattr(world_set, "lock"):
8324 if hasattr(world_set, "load"):
8325 world_set.load() # maybe it's changed on disk
8327 atom = create_world_atom(pkg, args_set, root_config)
8329 if hasattr(world_set, "add"):
8330 self._status_msg(('Recording %s in "world" ' + \
8331 'favorites file...') % atom)
8332 logger.log(" === (%s of %s) Updating world file (%s)" % \
8333 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
8336 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
8337 (atom,), level=logging.WARN, noiselevel=-1)
8342 def _pkg(self, cpv, type_name, root_config, installed=False):
8344 Get a package instance from the cache, or create a new
8345 one if necessary. Raises KeyError from aux_get if it
8346 failures for some reason (package does not exist or is
8351 operation = "nomerge"
8353 if self._digraph is not None:
8354 # Reuse existing instance when available.
8355 pkg = self._digraph.get(
8356 (type_name, root_config.root, cpv, operation))
8360 tree_type = depgraph.pkg_tree_map[type_name]
8361 db = root_config.trees[tree_type].dbapi
8362 db_keys = list(self.trees[root_config.root][
8363 tree_type].dbapi._aux_cache_keys)
8364 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
8365 pkg = Package(cpv=cpv, metadata=metadata,
8366 root_config=root_config, installed=installed)
8367 if type_name == "ebuild":
8368 settings = self.pkgsettings[root_config.root]
8369 settings.setcpv(pkg)
8370 pkg.metadata["USE"] = settings["PORTAGE_USE"]
8371 pkg.metadata['CHOST'] = settings.get('CHOST', '')
8375 class MetadataRegen(PollScheduler):
8377 def __init__(self, portdb, cp_iter=None, consumer=None,
8378 max_jobs=None, max_load=None):
8379 PollScheduler.__init__(self)
8380 self._portdb = portdb
8381 self._global_cleanse = False
8383 cp_iter = self._iter_every_cp()
8384 # We can globally cleanse stale cache only if we
8385 # iterate over every single cp.
8386 self._global_cleanse = True
8387 self._cp_iter = cp_iter
8388 self._consumer = consumer
8390 if max_jobs is None:
8393 self._max_jobs = max_jobs
8394 self._max_load = max_load
8395 self._sched_iface = self._sched_iface_class(
8396 register=self._register,
8397 schedule=self._schedule_wait,
8398 unregister=self._unregister)
8400 self._valid_pkgs = set()
8401 self._cp_set = set()
8402 self._process_iter = self._iter_metadata_processes()
8403 self.returncode = os.EX_OK
8404 self._error_count = 0
8406 def _iter_every_cp(self):
8407 every_cp = self._portdb.cp_all()
8408 every_cp.sort(reverse=True)
8411 yield every_cp.pop()
8415 def _iter_metadata_processes(self):
8416 portdb = self._portdb
8417 valid_pkgs = self._valid_pkgs
8418 cp_set = self._cp_set
8419 consumer = self._consumer
8421 for cp in self._cp_iter:
8423 portage.writemsg_stdout("Processing %s\n" % cp)
8424 cpv_list = portdb.cp_list(cp)
8425 for cpv in cpv_list:
8427 ebuild_path, repo_path = portdb.findname2(cpv)
8428 metadata, st, emtime = portdb._pull_valid_cache(
8429 cpv, ebuild_path, repo_path)
8430 if metadata is not None:
8431 if consumer is not None:
8432 consumer(cpv, ebuild_path,
8433 repo_path, metadata)
8436 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
8437 ebuild_mtime=emtime,
8438 metadata_callback=portdb._metadata_callback,
8439 portdb=portdb, repo_path=repo_path,
8440 settings=portdb.doebuild_settings)
8444 portdb = self._portdb
8445 from portage.cache.cache_errors import CacheError
8448 while self._schedule():
8454 if self._global_cleanse:
8455 for mytree in portdb.porttrees:
8457 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
8458 except CacheError, e:
8459 portage.writemsg("Error listing cache entries for " + \
8460 "'%s': %s, continuing...\n" % (mytree, e),
8466 cp_set = self._cp_set
8467 cpv_getkey = portage.cpv_getkey
8468 for mytree in portdb.porttrees:
8470 dead_nodes[mytree] = set(cpv for cpv in \
8471 portdb.auxdb[mytree].iterkeys() \
8472 if cpv_getkey(cpv) in cp_set)
8473 except CacheError, e:
8474 portage.writemsg("Error listing cache entries for " + \
8475 "'%s': %s, continuing...\n" % (mytree, e),
8482 for y in self._valid_pkgs:
8483 for mytree in portdb.porttrees:
8484 if portdb.findname2(y, mytree=mytree)[0]:
8485 dead_nodes[mytree].discard(y)
8487 for mytree, nodes in dead_nodes.iteritems():
8488 auxdb = portdb.auxdb[mytree]
8492 except (KeyError, CacheError):
8495 def _schedule_tasks(self):
8498 @returns: True if there may be remaining tasks to schedule,
8501 while self._can_add_job():
8503 metadata_process = self._process_iter.next()
8504 except StopIteration:
8508 metadata_process.scheduler = self._sched_iface
8509 metadata_process.addExitListener(self._metadata_exit)
8510 metadata_process.start()
8513 def _metadata_exit(self, metadata_process):
8515 if metadata_process.returncode != os.EX_OK:
8517 self._error_count += 1
8518 self._valid_pkgs.discard(metadata_process.cpv)
8519 portage.writemsg("Error processing %s, continuing...\n" % \
8520 (metadata_process.cpv,), noiselevel=-1)
8522 if self._consumer is not None:
8523 # On failure, still notify the consumer (in this case the metadata
8524 # argument is None).
8525 self._consumer(metadata_process.cpv,
8526 metadata_process.ebuild_path,
8527 metadata_process.repo_path,
8528 metadata_process.metadata)
8532 def unmerge(root_config, myopts, unmerge_action,
8533 unmerge_files, ldpath_mtimes, autoclean=0,
8534 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
8535 scheduler=None, writemsg_level=portage.util.writemsg_level):
8538 clean_world = myopts.get('--deselect') != 'n'
8539 quiet = "--quiet" in myopts
8540 settings = root_config.settings
8541 sets = root_config.sets
8542 vartree = root_config.trees["vartree"]
8543 candidate_catpkgs=[]
8545 xterm_titles = "notitles" not in settings.features
8546 out = portage.output.EOutput()
8548 db_keys = list(vartree.dbapi._aux_cache_keys)
8551 pkg = pkg_cache.get(cpv)
8553 pkg = Package(cpv=cpv, installed=True,
8554 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
8555 root_config=root_config,
8556 type_name="installed")
8557 pkg_cache[cpv] = pkg
8560 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8562 # At least the parent needs to exist for the lock file.
8563 portage.util.ensure_dirs(vdb_path)
8564 except portage.exception.PortageException:
8568 if os.access(vdb_path, os.W_OK):
8569 vdb_lock = portage.locks.lockdir(vdb_path)
8570 realsyslist = sets["system"].getAtoms()
8572 for x in realsyslist:
8573 mycp = portage.dep_getkey(x)
8574 if mycp in settings.getvirtuals():
8576 for provider in settings.getvirtuals()[mycp]:
8577 if vartree.dbapi.match(provider):
8578 providers.append(provider)
8579 if len(providers) == 1:
8580 syslist.extend(providers)
8582 syslist.append(mycp)
8584 mysettings = portage.config(clone=settings)
8586 if not unmerge_files:
8587 if unmerge_action == "unmerge":
8589 print bold("emerge unmerge") + " can only be used with specific package names"
8596 # process all arguments and add all
8597 # valid db entries to candidate_catpkgs
8599 if not unmerge_files:
8600 candidate_catpkgs.extend(vartree.dbapi.cp_all())
8602 #we've got command-line arguments
8603 if not unmerge_files:
8604 print "\nNo packages to unmerge have been provided.\n"
8606 for x in unmerge_files:
8607 arg_parts = x.split('/')
8608 if x[0] not in [".","/"] and \
8609 arg_parts[-1][-7:] != ".ebuild":
8610 #possible cat/pkg or dep; treat as such
8611 candidate_catpkgs.append(x)
8612 elif unmerge_action in ["prune","clean"]:
8613 print "\n!!! Prune and clean do not accept individual" + \
8614 " ebuilds as arguments;\n skipping.\n"
8617 # it appears that the user is specifying an installed
8618 # ebuild and we're in "unmerge" mode, so it's ok.
8619 if not os.path.exists(x):
8620 print "\n!!! The path '"+x+"' doesn't exist.\n"
8623 absx = os.path.abspath(x)
8624 sp_absx = absx.split("/")
8625 if sp_absx[-1][-7:] == ".ebuild":
8627 absx = "/".join(sp_absx)
8629 sp_absx_len = len(sp_absx)
8631 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8632 vdb_len = len(vdb_path)
8634 sp_vdb = vdb_path.split("/")
8635 sp_vdb_len = len(sp_vdb)
8637 if not os.path.exists(absx+"/CONTENTS"):
8638 print "!!! Not a valid db dir: "+str(absx)
8641 if sp_absx_len <= sp_vdb_len:
8642 # The Path is shorter... so it can't be inside the vdb.
8645 print "\n!!!",x,"cannot be inside "+ \
8646 vdb_path+"; aborting.\n"
8649 for idx in range(0,sp_vdb_len):
8650 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
8653 print "\n!!!", x, "is not inside "+\
8654 vdb_path+"; aborting.\n"
8657 print "="+"/".join(sp_absx[sp_vdb_len:])
8658 candidate_catpkgs.append(
8659 "="+"/".join(sp_absx[sp_vdb_len:]))
8662 if (not "--quiet" in myopts):
8664 if settings["ROOT"] != "/":
8665 writemsg_level(darkgreen(newline+ \
8666 ">>> Using system located in ROOT tree %s\n" % \
8669 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
8670 not ("--quiet" in myopts):
8671 writemsg_level(darkgreen(newline+\
8672 ">>> These are the packages that would be unmerged:\n"))
8674 # Preservation of order is required for --depclean and --prune so
8675 # that dependencies are respected. Use all_selected to eliminate
8676 # duplicate packages since the same package may be selected by
8679 all_selected = set()
8680 for x in candidate_catpkgs:
8681 # cycle through all our candidate deps and determine
8682 # what will and will not get unmerged
8684 mymatch = vartree.dbapi.match(x)
8685 except portage.exception.AmbiguousPackageName, errpkgs:
8686 print "\n\n!!! The short ebuild name \"" + \
8687 x + "\" is ambiguous. Please specify"
8688 print "!!! one of the following fully-qualified " + \
8689 "ebuild names instead:\n"
8690 for i in errpkgs[0]:
8691 print " " + green(i)
8695 if not mymatch and x[0] not in "<>=~":
8696 mymatch = localtree.dep_match(x)
8698 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
8699 (x, unmerge_action), noiselevel=-1)
8703 {"protected": set(), "selected": set(), "omitted": set()})
8704 mykey = len(pkgmap) - 1
8705 if unmerge_action=="unmerge":
8707 if y not in all_selected:
8708 pkgmap[mykey]["selected"].add(y)
8710 elif unmerge_action == "prune":
8711 if len(mymatch) == 1:
8713 best_version = mymatch[0]
8714 best_slot = vartree.getslot(best_version)
8715 best_counter = vartree.dbapi.cpv_counter(best_version)
8716 for mypkg in mymatch[1:]:
8717 myslot = vartree.getslot(mypkg)
8718 mycounter = vartree.dbapi.cpv_counter(mypkg)
8719 if (myslot == best_slot and mycounter > best_counter) or \
8720 mypkg == portage.best([mypkg, best_version]):
8721 if myslot == best_slot:
8722 if mycounter < best_counter:
8723 # On slot collision, keep the one with the
8724 # highest counter since it is the most
8725 # recently installed.
8727 best_version = mypkg
8729 best_counter = mycounter
8730 pkgmap[mykey]["protected"].add(best_version)
8731 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
8732 if mypkg != best_version and mypkg not in all_selected)
8733 all_selected.update(pkgmap[mykey]["selected"])
8735 # unmerge_action == "clean"
8737 for mypkg in mymatch:
8738 if unmerge_action == "clean":
8739 myslot = localtree.getslot(mypkg)
8741 # since we're pruning, we don't care about slots
8742 # and put all the pkgs in together
8744 if myslot not in slotmap:
8745 slotmap[myslot] = {}
8746 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
8748 for mypkg in vartree.dbapi.cp_list(
8749 portage.dep_getkey(mymatch[0])):
8750 myslot = vartree.getslot(mypkg)
8751 if myslot not in slotmap:
8752 slotmap[myslot] = {}
8753 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
8755 for myslot in slotmap:
8756 counterkeys = slotmap[myslot].keys()
8760 pkgmap[mykey]["protected"].add(
8761 slotmap[myslot][counterkeys[-1]])
8764 for counter in counterkeys[:]:
8765 mypkg = slotmap[myslot][counter]
8766 if mypkg not in mymatch:
8767 counterkeys.remove(counter)
8768 pkgmap[mykey]["protected"].add(
8769 slotmap[myslot][counter])
8771 #be pretty and get them in order of merge:
8772 for ckey in counterkeys:
8773 mypkg = slotmap[myslot][ckey]
8774 if mypkg not in all_selected:
8775 pkgmap[mykey]["selected"].add(mypkg)
8776 all_selected.add(mypkg)
8777 # ok, now the last-merged package
8778 # is protected, and the rest are selected
8779 numselected = len(all_selected)
8780 if global_unmerge and not numselected:
8781 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
8785 portage.writemsg_stdout(
8786 "\n>>> No packages selected for removal by " + \
8787 unmerge_action + "\n")
8791 vartree.dbapi.flush_cache()
8792 portage.locks.unlockdir(vdb_lock)
8794 from portage.sets.base import EditablePackageSet
8796 # generate a list of package sets that are directly or indirectly listed in "world",
8797 # as there is no persistent list of "installed" sets
8798 installed_sets = ["world"]
8803 pos = len(installed_sets)
8804 for s in installed_sets[pos - 1:]:
8807 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
8810 installed_sets += candidates
8811 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
8814 # we don't want to unmerge packages that are still listed in user-editable package sets
8815 # listed in "world" as they would be remerged on the next update of "world" or the
8816 # relevant package sets.
8817 unknown_sets = set()
8818 for cp in xrange(len(pkgmap)):
8819 for cpv in pkgmap[cp]["selected"].copy():
8823 # It could have been uninstalled
8824 # by a concurrent process.
8827 if unmerge_action != "clean" and \
8828 root_config.root == "/" and \
8829 portage.match_from_list(
8830 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
8831 msg = ("Not unmerging package %s since there is no valid " + \
8832 "reason for portage to unmerge itself.") % (pkg.cpv,)
8833 for line in textwrap.wrap(msg, 75):
8835 # adjust pkgmap so the display output is correct
8836 pkgmap[cp]["selected"].remove(cpv)
8837 all_selected.remove(cpv)
8838 pkgmap[cp]["protected"].add(cpv)
8842 for s in installed_sets:
8843 # skip sets that the user requested to unmerge, and skip world
8844 # unless we're unmerging a package set (as the package would be
8845 # removed from "world" later on)
8846 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
8850 if s in unknown_sets:
8853 out = portage.output.EOutput()
8854 out.eerror(("Unknown set '@%s' in " + \
8855 "%svar/lib/portage/world_sets") % \
8856 (s, root_config.root))
8859 # only check instances of EditablePackageSet as other classes are generally used for
8860 # special purposes and can be ignored here (and are usually generated dynamically, so the
8861 # user can't do much about them anyway)
8862 if isinstance(sets[s], EditablePackageSet):
8864 # This is derived from a snippet of code in the
8865 # depgraph._iter_atoms_for_pkg() method.
8866 for atom in sets[s].iterAtomsForPackage(pkg):
8867 inst_matches = vartree.dbapi.match(atom)
8868 inst_matches.reverse() # descending order
8870 for inst_cpv in inst_matches:
8872 inst_pkg = _pkg(inst_cpv)
8874 # It could have been uninstalled
8875 # by a concurrent process.
8878 if inst_pkg.cp != atom.cp:
8881 # This is descending order, and we're not
8882 # interested in any versions <= pkg given.
8884 if pkg.slot_atom != inst_pkg.slot_atom:
8885 higher_slot = inst_pkg
8887 if higher_slot is None:
8891 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
8892 #print colorize("WARN", "but still listed in the following package sets:")
8893 #print " %s\n" % ", ".join(parents)
8894 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
8895 print colorize("WARN", "still referenced by the following package sets:")
8896 print " %s\n" % ", ".join(parents)
8897 # adjust pkgmap so the display output is correct
8898 pkgmap[cp]["selected"].remove(cpv)
8899 all_selected.remove(cpv)
8900 pkgmap[cp]["protected"].add(cpv)
8904 numselected = len(all_selected)
8907 "\n>>> No packages selected for removal by " + \
8908 unmerge_action + "\n")
8911 # Unmerge order only matters in some cases
8915 selected = d["selected"]
8918 cp = portage.cpv_getkey(iter(selected).next())
8919 cp_dict = unordered.get(cp)
8922 unordered[cp] = cp_dict
8925 for k, v in d.iteritems():
8926 cp_dict[k].update(v)
8927 pkgmap = [unordered[cp] for cp in sorted(unordered)]
8929 for x in xrange(len(pkgmap)):
8930 selected = pkgmap[x]["selected"]
8933 for mytype, mylist in pkgmap[x].iteritems():
8934 if mytype == "selected":
8936 mylist.difference_update(all_selected)
8937 cp = portage.cpv_getkey(iter(selected).next())
8938 for y in localtree.dep_match(cp):
8939 if y not in pkgmap[x]["omitted"] and \
8940 y not in pkgmap[x]["selected"] and \
8941 y not in pkgmap[x]["protected"] and \
8942 y not in all_selected:
8943 pkgmap[x]["omitted"].add(y)
8944 if global_unmerge and not pkgmap[x]["selected"]:
8945 #avoid cluttering the preview printout with stuff that isn't getting unmerged
8947 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
8948 writemsg_level(colorize("BAD","\a\n\n!!! " + \
8949 "'%s' is part of your system profile.\n" % cp),
8950 level=logging.WARNING, noiselevel=-1)
8951 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
8952 "be damaging to your system.\n\n"),
8953 level=logging.WARNING, noiselevel=-1)
8954 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
8955 countdown(int(settings["EMERGE_WARNING_DELAY"]),
8956 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
8958 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
8960 writemsg_level(bold(cp) + ": ", noiselevel=-1)
8961 for mytype in ["selected","protected","omitted"]:
8963 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
8964 if pkgmap[x][mytype]:
8965 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
8966 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
8967 for pn, ver, rev in sorted_pkgs:
8971 myversion = ver + "-" + rev
8972 if mytype == "selected":
8974 colorize("UNMERGE_WARN", myversion + " "),
8978 colorize("GOOD", myversion + " "), noiselevel=-1)
8980 writemsg_level("none ", noiselevel=-1)
8982 writemsg_level("\n", noiselevel=-1)
8984 writemsg_level("\n", noiselevel=-1)
8986 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
8987 " packages are slated for removal.\n")
8988 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
8989 " and " + colorize("GOOD", "'omitted'") + \
8990 " packages will not be removed.\n\n")
8992 if "--pretend" in myopts:
8993 #we're done... return
8995 if "--ask" in myopts:
8996 if userquery("Would you like to unmerge these packages?")=="No":
8997 # enter pretend mode for correct formatting of results
8998 myopts["--pretend"] = True
9003 #the real unmerging begins, after a short delay....
9004 if clean_delay and not autoclean:
9005 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
9007 for x in xrange(len(pkgmap)):
9008 for y in pkgmap[x]["selected"]:
9009 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
9010 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
9011 mysplit = y.split("/")
9013 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
9014 mysettings, unmerge_action not in ["clean","prune"],
9015 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
9016 scheduler=scheduler)
9018 if retval != os.EX_OK:
9019 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
9021 raise UninstallFailure(retval)
9024 if clean_world and hasattr(sets["world"], "cleanPackage"):
9025 sets["world"].cleanPackage(vartree.dbapi, y)
9026 emergelog(xterm_titles, " >>> unmerge success: "+y)
9027 if clean_world and hasattr(sets["world"], "remove"):
9028 for s in root_config.setconfig.active:
9029 sets["world"].remove(SETPREFIX+s)
9032 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
9034 if os.path.exists("/usr/bin/install-info"):
9035 out = portage.output.EOutput()
9040 inforoot=normpath(root+z)
9041 if os.path.isdir(inforoot):
9042 infomtime = long(os.stat(inforoot).st_mtime)
9043 if inforoot not in prev_mtimes or \
9044 prev_mtimes[inforoot] != infomtime:
9045 regen_infodirs.append(inforoot)
9047 if not regen_infodirs:
9048 portage.writemsg_stdout("\n")
9049 out.einfo("GNU info directory index is up-to-date.")
9051 portage.writemsg_stdout("\n")
9052 out.einfo("Regenerating GNU info directory index...")
9054 dir_extensions = ("", ".gz", ".bz2")
9058 for inforoot in regen_infodirs:
9062 if not os.path.isdir(inforoot) or \
9063 not os.access(inforoot, os.W_OK):
9066 file_list = os.listdir(inforoot)
9068 dir_file = os.path.join(inforoot, "dir")
9069 moved_old_dir = False
9072 if x.startswith(".") or \
9073 os.path.isdir(os.path.join(inforoot, x)):
9075 if x.startswith("dir"):
9077 for ext in dir_extensions:
9078 if x == "dir" + ext or \
9079 x == "dir" + ext + ".old":
9084 if processed_count == 0:
9085 for ext in dir_extensions:
9087 os.rename(dir_file + ext, dir_file + ext + ".old")
9088 moved_old_dir = True
9089 except EnvironmentError, e:
9090 if e.errno != errno.ENOENT:
9093 processed_count += 1
9094 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
9095 existsstr="already exists, for file `"
9097 if re.search(existsstr,myso):
9098 # Already exists... Don't increment the count for this.
9100 elif myso[:44]=="install-info: warning: no info dir entry in ":
9101 # This info file doesn't contain a DIR-header: install-info produces this
9102 # (harmless) warning (the --quiet switch doesn't seem to work).
9103 # Don't increment the count for this.
9107 errmsg += myso + "\n"
9110 if moved_old_dir and not os.path.exists(dir_file):
9111 # We didn't generate a new dir file, so put the old file
9112 # back where it was originally found.
9113 for ext in dir_extensions:
9115 os.rename(dir_file + ext + ".old", dir_file + ext)
9116 except EnvironmentError, e:
9117 if e.errno != errno.ENOENT:
9121 # Clean dir.old cruft so that they don't prevent
9122 # unmerge of otherwise empty directories.
9123 for ext in dir_extensions:
9125 os.unlink(dir_file + ext + ".old")
9126 except EnvironmentError, e:
9127 if e.errno != errno.ENOENT:
9131 #update mtime so we can potentially avoid regenerating.
9132 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
9135 out.eerror("Processed %d info files; %d errors." % \
9137 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
9140 out.einfo("Processed %d info files." % (icount,))
9143 def display_news_notification(root_config, myopts):
9144 target_root = root_config.root
9145 trees = root_config.trees
9146 settings = trees["vartree"].settings
9147 portdb = trees["porttree"].dbapi
9148 vardb = trees["vartree"].dbapi
9149 NEWS_PATH = os.path.join("metadata", "news")
9150 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
9151 newsReaderDisplay = False
9152 update = "--pretend" not in myopts
9154 for repo in portdb.getRepositories():
9155 unreadItems = checkUpdatedNewsItems(
9156 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
9158 if not newsReaderDisplay:
9159 newsReaderDisplay = True
9161 print colorize("WARN", " * IMPORTANT:"),
9162 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
9165 if newsReaderDisplay:
9166 print colorize("WARN", " *"),
9167 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
9170 def display_preserved_libs(vardbapi):
9173 # Ensure the registry is consistent with existing files.
9174 vardbapi.plib_registry.pruneNonExisting()
9176 if vardbapi.plib_registry.hasEntries():
9178 print colorize("WARN", "!!!") + " existing preserved libs:"
9179 plibdata = vardbapi.plib_registry.getPreservedLibs()
9180 linkmap = vardbapi.linkmap
9183 linkmap_broken = False
9187 except portage.exception.CommandNotFound, e:
9188 writemsg_level("!!! Command Not Found: %s\n" % (e,),
9189 level=logging.ERROR, noiselevel=-1)
9191 linkmap_broken = True
9193 search_for_owners = set()
9194 for cpv in plibdata:
9195 internal_plib_keys = set(linkmap._obj_key(f) \
9196 for f in plibdata[cpv])
9197 for f in plibdata[cpv]:
9198 if f in consumer_map:
9201 for c in linkmap.findConsumers(f):
9202 # Filter out any consumers that are also preserved libs
9203 # belonging to the same package as the provider.
9204 if linkmap._obj_key(c) not in internal_plib_keys:
9207 consumer_map[f] = consumers
9208 search_for_owners.update(consumers[:MAX_DISPLAY+1])
9210 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
9212 for cpv in plibdata:
9213 print colorize("WARN", ">>>") + " package: %s" % cpv
9215 for f in plibdata[cpv]:
9216 obj_key = linkmap._obj_key(f)
9217 alt_paths = samefile_map.get(obj_key)
9218 if alt_paths is None:
9220 samefile_map[obj_key] = alt_paths
9223 for alt_paths in samefile_map.itervalues():
9224 alt_paths = sorted(alt_paths)
9226 print colorize("WARN", " * ") + " - %s" % (p,)
9228 consumers = consumer_map.get(f, [])
9229 for c in consumers[:MAX_DISPLAY]:
9230 print colorize("WARN", " * ") + " used by %s (%s)" % \
9231 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
9232 if len(consumers) == MAX_DISPLAY + 1:
9233 print colorize("WARN", " * ") + " used by %s (%s)" % \
9234 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
9235 for x in owners.get(consumers[MAX_DISPLAY], [])))
9236 elif len(consumers) > MAX_DISPLAY:
9237 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
9238 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
9241 def _flush_elog_mod_echo():
9243 Dump the mod_echo output now so that our other
9244 notifications are shown last.
9246 @returns: True if messages were shown, False otherwise.
9248 messages_shown = False
9250 from portage.elog import mod_echo
9252 pass # happens during downgrade to a version without the module
9254 messages_shown = bool(mod_echo._items)
9256 return messages_shown
9258 def post_emerge(root_config, myopts, mtimedb, retval):
9260 Misc. things to run at the end of a merge session.
9266 Display preserved libs warnings
9269 @param trees: A dictionary mapping each ROOT to it's package databases
9271 @param mtimedb: The mtimeDB to store data needed across merge invocations
9272 @type mtimedb: MtimeDB class instance
9273 @param retval: Emerge's return value
9277 1. Calls sys.exit(retval)
9280 target_root = root_config.root
9281 trees = { target_root : root_config.trees }
9282 vardbapi = trees[target_root]["vartree"].dbapi
9283 settings = vardbapi.settings
9284 info_mtimes = mtimedb["info"]
9286 # Load the most current variables from ${ROOT}/etc/profile.env
9289 settings.regenerate()
9292 config_protect = settings.get("CONFIG_PROTECT","").split()
9293 infodirs = settings.get("INFOPATH","").split(":") + \
9294 settings.get("INFODIR","").split(":")
9298 if retval == os.EX_OK:
9299 exit_msg = " *** exiting successfully."
9301 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
9302 emergelog("notitles" not in settings.features, exit_msg)
9304 _flush_elog_mod_echo()
9306 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
9307 if "--pretend" in myopts or (counter_hash is not None and \
9308 counter_hash == vardbapi._counter_hash()):
9309 display_news_notification(root_config, myopts)
9310 # If vdb state has not changed then there's nothing else to do.
9313 vdb_path = os.path.join(target_root, portage.VDB_PATH)
9314 portage.util.ensure_dirs(vdb_path)
9316 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
9317 vdb_lock = portage.locks.lockdir(vdb_path)
9321 if "noinfo" not in settings.features:
9322 chk_updated_info_files(target_root,
9323 infodirs, info_mtimes, retval)
9327 portage.locks.unlockdir(vdb_lock)
9329 chk_updated_cfg_files(target_root, config_protect)
9331 display_news_notification(root_config, myopts)
9332 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
9333 display_preserved_libs(vardbapi)
9338 def chk_updated_cfg_files(target_root, config_protect):
9340 #number of directories with some protect files in them
9342 for x in config_protect:
9343 x = os.path.join(target_root, x.lstrip(os.path.sep))
9344 if not os.access(x, os.W_OK):
9345 # Avoid Permission denied errors generated
9349 mymode = os.lstat(x).st_mode
9352 if stat.S_ISLNK(mymode):
9353 # We want to treat it like a directory if it
9354 # is a symlink to an existing directory.
9356 real_mode = os.stat(x).st_mode
9357 if stat.S_ISDIR(real_mode):
9361 if stat.S_ISDIR(mymode):
9362 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
9364 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
9365 os.path.split(x.rstrip(os.path.sep))
9366 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
9367 a = commands.getstatusoutput(mycommand)
9369 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
9371 # Show the error message alone, sending stdout to /dev/null.
9372 os.system(mycommand + " 1>/dev/null")
9374 files = a[1].split('\0')
9375 # split always produces an empty string as the last element
9376 if files and not files[-1]:
9380 print "\n"+colorize("WARN", " * IMPORTANT:"),
9381 if stat.S_ISDIR(mymode):
9382 print "%d config files in '%s' need updating." % \
9385 print "config file '%s' needs updating." % x
9388 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
9389 " section of the " + bold("emerge")
9390 print " "+yellow("*")+" man page to learn how to update config files."
9392 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
9395 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
9396 Returns the number of unread (yet relevent) items.
9398 @param portdb: a portage tree database
9399 @type portdb: pordbapi
9400 @param vardb: an installed package database
9401 @type vardb: vardbapi
9410 1. The number of unread but relevant news items.
9413 from portage.news import NewsManager
9414 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
9415 return manager.getUnreadItems( repo_id, update=update )
9417 def insert_category_into_atom(atom, category):
9418 alphanum = re.search(r'\w', atom)
9420 ret = atom[:alphanum.start()] + "%s/" % category + \
9421 atom[alphanum.start():]
9426 def is_valid_package_atom(x):
9428 alphanum = re.search(r'\w', x)
9430 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
9431 return portage.isvalidatom(x)
9433 def show_blocker_docs_link():
9435 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
9436 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
9438 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
9441 def show_mask_docs():
9442 print "For more information, see the MASKED PACKAGES section in the emerge"
9443 print "man page or refer to the Gentoo Handbook."
9445 def action_sync(settings, trees, mtimedb, myopts, myaction):
9446 xterm_titles = "notitles" not in settings.features
9447 emergelog(xterm_titles, " === sync")
9448 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9449 myportdir = portdb.porttree_root
9450 out = portage.output.EOutput()
9452 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
9454 if myportdir[-1]=="/":
9455 myportdir=myportdir[:-1]
9457 st = os.stat(myportdir)
9461 print ">>>",myportdir,"not found, creating it."
9462 os.makedirs(myportdir,0755)
9463 st = os.stat(myportdir)
9466 spawn_kwargs["env"] = settings.environ()
9467 if 'usersync' in settings.features and \
9468 portage.data.secpass >= 2 and \
9469 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
9470 st.st_gid != os.getgid() and st.st_mode & 0070):
9472 homedir = pwd.getpwuid(st.st_uid).pw_dir
9476 # Drop privileges when syncing, in order to match
9477 # existing uid/gid settings.
9478 spawn_kwargs["uid"] = st.st_uid
9479 spawn_kwargs["gid"] = st.st_gid
9480 spawn_kwargs["groups"] = [st.st_gid]
9481 spawn_kwargs["env"]["HOME"] = homedir
9483 if not st.st_mode & 0020:
9484 umask = umask | 0020
9485 spawn_kwargs["umask"] = umask
9487 syncuri = settings.get("SYNC", "").strip()
9489 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
9490 noiselevel=-1, level=logging.ERROR)
9493 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
9494 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
9498 updatecache_flg = False
9499 if myaction == "metadata":
9500 print "skipping sync"
9501 updatecache_flg = True
9502 elif ".git" in vcs_dirs:
9503 # Update existing git repository, and ignore the syncuri. We are
9504 # going to trust the user and assume that the user is in the branch
9505 # that he/she wants updated. We'll let the user manage branches with
9507 if portage.process.find_binary("git") is None:
9508 msg = ["Command not found: git",
9509 "Type \"emerge dev-util/git\" to enable git support."]
9511 writemsg_level("!!! %s\n" % l,
9512 level=logging.ERROR, noiselevel=-1)
9514 msg = ">>> Starting git pull in %s..." % myportdir
9515 emergelog(xterm_titles, msg )
9516 writemsg_level(msg + "\n")
9517 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
9518 (portage._shell_quote(myportdir),), **spawn_kwargs)
9519 if exitcode != os.EX_OK:
9520 msg = "!!! git pull error in %s." % myportdir
9521 emergelog(xterm_titles, msg)
9522 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
9524 msg = ">>> Git pull in %s successful" % myportdir
9525 emergelog(xterm_titles, msg)
9526 writemsg_level(msg + "\n")
9527 exitcode = git_sync_timestamps(settings, myportdir)
9528 if exitcode == os.EX_OK:
9529 updatecache_flg = True
9530 elif syncuri[:8]=="rsync://":
9531 for vcs_dir in vcs_dirs:
9532 writemsg_level(("!!! %s appears to be under revision " + \
9533 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
9534 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
9536 if not os.path.exists("/usr/bin/rsync"):
9537 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
9538 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
9543 if settings["PORTAGE_RSYNC_OPTS"] == "":
9544 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
9546 "--recursive", # Recurse directories
9547 "--links", # Consider symlinks
9548 "--safe-links", # Ignore links outside of tree
9549 "--perms", # Preserve permissions
9550 "--times", # Preserive mod times
9551 "--compress", # Compress the data transmitted
9552 "--force", # Force deletion on non-empty dirs
9553 "--whole-file", # Don't do block transfers, only entire files
9554 "--delete", # Delete files that aren't in the master tree
9555 "--stats", # Show final statistics about what was transfered
9556 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
9557 "--exclude=/distfiles", # Exclude distfiles from consideration
9558 "--exclude=/local", # Exclude local from consideration
9559 "--exclude=/packages", # Exclude packages from consideration
9563 # The below validation is not needed when using the above hardcoded
9566 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
9568 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
9569 for opt in ("--recursive", "--times"):
9570 if opt not in rsync_opts:
9571 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9572 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9573 rsync_opts.append(opt)
9575 for exclude in ("distfiles", "local", "packages"):
9576 opt = "--exclude=/%s" % exclude
9577 if opt not in rsync_opts:
9578 portage.writemsg(yellow("WARNING:") + \
9579 " adding required option %s not included in " % opt + \
9580 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
9581 rsync_opts.append(opt)
9583 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
9584 def rsync_opt_startswith(opt_prefix):
9585 for x in rsync_opts:
9586 if x.startswith(opt_prefix):
9590 if not rsync_opt_startswith("--timeout="):
9591 rsync_opts.append("--timeout=%d" % mytimeout)
9593 for opt in ("--compress", "--whole-file"):
9594 if opt not in rsync_opts:
9595 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9596 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9597 rsync_opts.append(opt)
9599 if "--quiet" in myopts:
9600 rsync_opts.append("--quiet") # Shut up a lot
9602 rsync_opts.append("--verbose") # Print filelist
9604 if "--verbose" in myopts:
9605 rsync_opts.append("--progress") # Progress meter for each file
9607 if "--debug" in myopts:
9608 rsync_opts.append("--checksum") # Force checksum on all files
9610 # Real local timestamp file.
9611 servertimestampfile = os.path.join(
9612 myportdir, "metadata", "timestamp.chk")
9614 content = portage.util.grabfile(servertimestampfile)
9618 mytimestamp = time.mktime(time.strptime(content[0],
9619 "%a, %d %b %Y %H:%M:%S +0000"))
9620 except (OverflowError, ValueError):
9625 rsync_initial_timeout = \
9626 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
9628 rsync_initial_timeout = 15
9631 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
9632 except SystemExit, e:
9633 raise # Needed else can't exit
9635 maxretries=3 #default number of retries
9638 user_name, hostname, port = re.split(
9639 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
9642 if user_name is None:
9644 updatecache_flg=True
9645 all_rsync_opts = set(rsync_opts)
9646 extra_rsync_opts = shlex.split(
9647 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
9648 all_rsync_opts.update(extra_rsync_opts)
9649 family = socket.AF_INET
9650 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
9651 family = socket.AF_INET
9652 elif socket.has_ipv6 and \
9653 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
9654 family = socket.AF_INET6
9656 SERVER_OUT_OF_DATE = -1
9657 EXCEEDED_MAX_RETRIES = -2
9663 for addrinfo in socket.getaddrinfo(
9664 hostname, None, family, socket.SOCK_STREAM):
9665 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
9666 # IPv6 addresses need to be enclosed in square brackets
9667 ips.append("[%s]" % addrinfo[4][0])
9669 ips.append(addrinfo[4][0])
9670 from random import shuffle
9672 except SystemExit, e:
9673 raise # Needed else can't exit
9674 except Exception, e:
9675 print "Notice:",str(e)
9680 dosyncuri = syncuri.replace(
9681 "//" + user_name + hostname + port + "/",
9682 "//" + user_name + ips[0] + port + "/", 1)
9683 except SystemExit, e:
9684 raise # Needed else can't exit
9685 except Exception, e:
9686 print "Notice:",str(e)
9690 if "--ask" in myopts:
9691 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
9696 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
9697 if "--quiet" not in myopts:
9698 print ">>> Starting rsync with "+dosyncuri+"..."
9700 emergelog(xterm_titles,
9701 ">>> Starting retry %d of %d with %s" % \
9702 (retries,maxretries,dosyncuri))
9703 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
9705 if mytimestamp != 0 and "--quiet" not in myopts:
9706 print ">>> Checking server timestamp ..."
9708 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
9710 if "--debug" in myopts:
9715 # Even if there's no timestamp available locally, fetch the
9716 # timestamp anyway as an initial probe to verify that the server is
9717 # responsive. This protects us from hanging indefinitely on a
9718 # connection attempt to an unresponsive server which rsync's
9719 # --timeout option does not prevent.
9721 # Temporary file for remote server timestamp comparison.
9722 from tempfile import mkstemp
9723 fd, tmpservertimestampfile = mkstemp()
9725 mycommand = rsynccommand[:]
9726 mycommand.append(dosyncuri.rstrip("/") + \
9727 "/metadata/timestamp.chk")
9728 mycommand.append(tmpservertimestampfile)
9732 def timeout_handler(signum, frame):
9733 raise portage.exception.PortageException("timed out")
9734 signal.signal(signal.SIGALRM, timeout_handler)
9735 # Timeout here in case the server is unresponsive. The
9736 # --timeout rsync option doesn't apply to the initial
9737 # connection attempt.
9738 if rsync_initial_timeout:
9739 signal.alarm(rsync_initial_timeout)
9741 mypids.extend(portage.process.spawn(
9742 mycommand, env=settings.environ(), returnpid=True))
9743 exitcode = os.waitpid(mypids[0], 0)[1]
9744 content = portage.grabfile(tmpservertimestampfile)
9746 if rsync_initial_timeout:
9749 os.unlink(tmpservertimestampfile)
9752 except portage.exception.PortageException, e:
9756 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
9757 os.kill(mypids[0], signal.SIGTERM)
9758 os.waitpid(mypids[0], 0)
9759 # This is the same code rsync uses for timeout.
9762 if exitcode != os.EX_OK:
9764 exitcode = (exitcode & 0xff) << 8
9766 exitcode = exitcode >> 8
9768 portage.process.spawned_pids.remove(mypids[0])
9771 servertimestamp = time.mktime(time.strptime(
9772 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
9773 except (OverflowError, ValueError):
9775 del mycommand, mypids, content
9776 if exitcode == os.EX_OK:
9777 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
9778 emergelog(xterm_titles,
9779 ">>> Cancelling sync -- Already current.")
9782 print ">>> Timestamps on the server and in the local repository are the same."
9783 print ">>> Cancelling all further sync action. You are already up to date."
9785 print ">>> In order to force sync, remove '%s'." % servertimestampfile
9789 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
9790 emergelog(xterm_titles,
9791 ">>> Server out of date: %s" % dosyncuri)
9794 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
9796 print ">>> In order to force sync, remove '%s'." % servertimestampfile
9799 exitcode = SERVER_OUT_OF_DATE
9800 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
9802 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
9803 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
9804 if exitcode in [0,1,3,4,11,14,20,21]:
9806 elif exitcode in [1,3,4,11,14,20,21]:
9809 # Code 2 indicates protocol incompatibility, which is expected
9810 # for servers with protocol < 29 that don't support
9811 # --prune-empty-directories. Retry for a server that supports
9812 # at least rsync protocol version 29 (>=rsync-2.6.4).
9817 if retries<=maxretries:
9818 print ">>> Retrying..."
9823 updatecache_flg=False
9824 exitcode = EXCEEDED_MAX_RETRIES
9828 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
9829 elif exitcode == SERVER_OUT_OF_DATE:
9831 elif exitcode == EXCEEDED_MAX_RETRIES:
9833 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
9838 msg.append("Rsync has reported that there is a syntax error. Please ensure")
9839 msg.append("that your SYNC statement is proper.")
9840 msg.append("SYNC=" + settings["SYNC"])
9842 msg.append("Rsync has reported that there is a File IO error. Normally")
9843 msg.append("this means your disk is full, but can be caused by corruption")
9844 msg.append("on the filesystem that contains PORTDIR. Please investigate")
9845 msg.append("and try again after the problem has been fixed.")
9846 msg.append("PORTDIR=" + settings["PORTDIR"])
9848 msg.append("Rsync was killed before it finished.")
9850 msg.append("Rsync has not successfully finished. It is recommended that you keep")
9851 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
9852 msg.append("to use rsync due to firewall or other restrictions. This should be a")
9853 msg.append("temporary problem unless complications exist with your network")
9854 msg.append("(and possibly your system's filesystem) configuration.")
9858 elif syncuri[:6]=="cvs://":
9859 if not os.path.exists("/usr/bin/cvs"):
9860 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
9861 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
9864 cvsdir=os.path.dirname(myportdir)
9865 if not os.path.exists(myportdir+"/CVS"):
9867 print ">>> Starting initial cvs checkout with "+syncuri+"..."
9868 if os.path.exists(cvsdir+"/gentoo-x86"):
9869 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
9874 if e.errno != errno.ENOENT:
9876 "!!! existing '%s' directory; exiting.\n" % myportdir)
9879 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
9880 print "!!! cvs checkout error; exiting."
9882 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
9885 print ">>> Starting cvs update with "+syncuri+"..."
9886 retval = portage.process.spawn_bash(
9887 "cd %s; cvs -z0 -q update -dP" % \
9888 (portage._shell_quote(myportdir),), **spawn_kwargs)
9889 if retval != os.EX_OK:
9893 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
9894 noiselevel=-1, level=logging.ERROR)
9897 if updatecache_flg and \
9898 myaction != "metadata" and \
9899 "metadata-transfer" not in settings.features:
9900 updatecache_flg = False
9902 # Reload the whole config from scratch.
9903 settings, trees, mtimedb = load_emerge_config(trees=trees)
9904 root_config = trees[settings["ROOT"]]["root_config"]
9905 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9907 if updatecache_flg and \
9908 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
9910 # Only update cache for myportdir since that's
9911 # the only one that's been synced here.
9912 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
9914 if portage._global_updates(trees, mtimedb["updates"]):
9916 # Reload the whole config from scratch.
9917 settings, trees, mtimedb = load_emerge_config(trees=trees)
9918 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9919 root_config = trees[settings["ROOT"]]["root_config"]
9921 mybestpv = portdb.xmatch("bestmatch-visible",
9922 portage.const.PORTAGE_PACKAGE_ATOM)
9923 mypvs = portage.best(
9924 trees[settings["ROOT"]]["vartree"].dbapi.match(
9925 portage.const.PORTAGE_PACKAGE_ATOM))
9927 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
9929 if myaction != "metadata":
9930 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
9931 retval = portage.process.spawn(
9932 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
9933 dosyncuri], env=settings.environ())
9934 if retval != os.EX_OK:
9935 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
9937 if(mybestpv != mypvs) and not "--quiet" in myopts:
9939 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
9940 print red(" * ")+"that you update portage now, before any other packages are updated."
9942 print red(" * ")+"To update portage, run 'emerge portage' now."
9945 display_news_notification(root_config, myopts)
9948 def git_sync_timestamps(settings, portdir):
9950 Since git doesn't preserve timestamps, synchronize timestamps between
9951 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
9952 for a given file as long as the file in the working tree is not modified
9955 cache_dir = os.path.join(portdir, "metadata", "cache")
9956 if not os.path.isdir(cache_dir):
9958 writemsg_level(">>> Synchronizing timestamps...\n")
9960 from portage.cache.cache_errors import CacheError
9962 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
9963 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
9964 except CacheError, e:
9965 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
9966 level=logging.ERROR, noiselevel=-1)
9969 ec_dir = os.path.join(portdir, "eclass")
9971 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
9972 if f.endswith(".eclass"))
9974 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
9975 level=logging.ERROR, noiselevel=-1)
9978 args = [portage.const.BASH_BINARY, "-c",
9979 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
9980 portage._shell_quote(portdir)]
9982 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
9983 modified_files = set(l.rstrip("\n") for l in proc.stdout)
9985 if rval != os.EX_OK:
9988 modified_eclasses = set(ec for ec in ec_names \
9989 if os.path.join("eclass", ec + ".eclass") in modified_files)
9991 updated_ec_mtimes = {}
9993 for cpv in cache_db:
9994 cpv_split = portage.catpkgsplit(cpv)
9995 if cpv_split is None:
9996 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
9997 level=logging.ERROR, noiselevel=-1)
10000 cat, pn, ver, rev = cpv_split
10001 cat, pf = portage.catsplit(cpv)
10002 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
10003 if relative_eb_path in modified_files:
10007 cache_entry = cache_db[cpv]
10008 eb_mtime = cache_entry.get("_mtime_")
10009 ec_mtimes = cache_entry.get("_eclasses_")
10011 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
10012 level=logging.ERROR, noiselevel=-1)
10014 except CacheError, e:
10015 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
10016 (cpv, e), level=logging.ERROR, noiselevel=-1)
10019 if eb_mtime is None:
10020 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
10021 level=logging.ERROR, noiselevel=-1)
10025 eb_mtime = long(eb_mtime)
10027 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
10028 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
10031 if ec_mtimes is None:
10032 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
10033 level=logging.ERROR, noiselevel=-1)
10036 if modified_eclasses.intersection(ec_mtimes):
10039 missing_eclasses = set(ec_mtimes).difference(ec_names)
10040 if missing_eclasses:
10041 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
10042 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
10046 eb_path = os.path.join(portdir, relative_eb_path)
10048 current_eb_mtime = os.stat(eb_path)
10050 writemsg_level("!!! Missing ebuild: %s\n" % \
10051 (cpv,), level=logging.ERROR, noiselevel=-1)
10054 inconsistent = False
10055 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
10056 updated_mtime = updated_ec_mtimes.get(ec)
10057 if updated_mtime is not None and updated_mtime != ec_mtime:
10058 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
10059 (cpv, ec), level=logging.ERROR, noiselevel=-1)
10060 inconsistent = True
10066 if current_eb_mtime != eb_mtime:
10067 os.utime(eb_path, (eb_mtime, eb_mtime))
10069 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
10070 if ec in updated_ec_mtimes:
10072 ec_path = os.path.join(ec_dir, ec + ".eclass")
10073 current_mtime = long(os.stat(ec_path).st_mtime)
10074 if current_mtime != ec_mtime:
10075 os.utime(ec_path, (ec_mtime, ec_mtime))
10076 updated_ec_mtimes[ec] = ec_mtime
10080 def action_metadata(settings, portdb, myopts, porttrees=None):
10081 if porttrees is None:
10082 porttrees = portdb.porttrees
10083 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
10084 old_umask = os.umask(0002)
10085 cachedir = os.path.normpath(settings.depcachedir)
10086 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
10087 "/lib", "/opt", "/proc", "/root", "/sbin",
10088 "/sys", "/tmp", "/usr", "/var"]:
10089 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
10090 "ROOT DIRECTORY ON YOUR SYSTEM."
10091 print >> sys.stderr, \
10092 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
10094 if not os.path.exists(cachedir):
10095 os.makedirs(cachedir)
10097 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
10098 auxdbkeys = tuple(auxdbkeys)
10100 class TreeData(object):
10101 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
10102 def __init__(self, dest_db, eclass_db, path, src_db):
10103 self.dest_db = dest_db
10104 self.eclass_db = eclass_db
10106 self.src_db = src_db
10107 self.valid_nodes = set()
10109 porttrees_data = []
10110 for path in porttrees:
10111 src_db = portdb._pregen_auxdb.get(path)
10112 if src_db is None and \
10113 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
10114 src_db = portdb.metadbmodule(
10115 path, 'metadata/cache', auxdbkeys, readonly=True)
10117 src_db.ec = portdb._repo_info[path].eclass_db
10118 except AttributeError:
10121 if src_db is not None:
10122 porttrees_data.append(TreeData(portdb.auxdb[path],
10123 portdb._repo_info[path].eclass_db, path, src_db))
10125 porttrees = [tree_data.path for tree_data in porttrees_data]
10127 isatty = sys.stdout.isatty()
10128 quiet = not isatty or '--quiet' in myopts
10131 progressBar = portage.output.TermProgressBar()
10132 progressHandler = ProgressHandler()
10133 onProgress = progressHandler.onProgress
10135 progressBar.set(progressHandler.curval, progressHandler.maxval)
10136 progressHandler.display = display
10137 def sigwinch_handler(signum, frame):
10138 lines, progressBar.term_columns = \
10139 portage.output.get_term_size()
10140 signal.signal(signal.SIGWINCH, sigwinch_handler)
10142 # Temporarily override portdb.porttrees so portdb.cp_all()
10143 # will only return the relevant subset.
10144 portdb_porttrees = portdb.porttrees
10145 portdb.porttrees = porttrees
10147 cp_all = portdb.cp_all()
10149 portdb.porttrees = portdb_porttrees
10152 maxval = len(cp_all)
10153 if onProgress is not None:
10154 onProgress(maxval, curval)
10156 from portage.cache.util import quiet_mirroring
10157 from portage import eapi_is_supported, \
10158 _validate_cache_for_unsupported_eapis
10160 # TODO: Display error messages, but do not interfere with the progress bar.
10162 # 1) erase the progress bar
10163 # 2) show the error message
10164 # 3) redraw the progress bar on a new line
10165 noise = quiet_mirroring()
10168 for tree_data in porttrees_data:
10169 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
10170 tree_data.valid_nodes.add(cpv)
10172 src = tree_data.src_db[cpv]
10173 except KeyError, e:
10174 noise.missing_entry(cpv)
10177 except CacheError, ce:
10178 noise.exception(cpv, ce)
10182 eapi = src.get('EAPI')
10185 eapi = eapi.lstrip('-')
10186 eapi_supported = eapi_is_supported(eapi)
10187 if not eapi_supported:
10188 if not _validate_cache_for_unsupported_eapis:
10189 noise.misc(cpv, "unable to validate " + \
10190 "cache for EAPI='%s'" % eapi)
10195 dest = tree_data.dest_db[cpv]
10196 except (KeyError, CacheError):
10199 for d in (src, dest):
10200 if d is not None and d.get('EAPI') in ('', '0'):
10203 if dest is not None:
10204 if not (dest['_mtime_'] == src['_mtime_'] and \
10205 tree_data.eclass_db.is_eclass_data_valid(
10206 dest['_eclasses_']) and \
10207 set(dest['_eclasses_']) == set(src['_eclasses_'])):
10210 # We don't want to skip the write unless we're really
10211 # sure that the existing cache is identical, so don't
10212 # trust _mtime_ and _eclasses_ alone.
10213 for k in set(chain(src, dest)).difference(
10214 ('_mtime_', '_eclasses_')):
10215 if dest.get(k, '') != src.get(k, ''):
10219 if dest is not None:
10220 # The existing data is valid and identical,
10221 # so there's no need to overwrite it.
10225 inherited = src.get('INHERITED', '')
10226 eclasses = src.get('_eclasses_')
10227 except CacheError, ce:
10228 noise.exception(cpv, ce)
10232 if eclasses is not None:
10233 if not tree_data.eclass_db.is_eclass_data_valid(
10234 src['_eclasses_']):
10235 noise.eclass_stale(cpv)
10237 inherited = eclasses
10239 inherited = inherited.split()
10241 if tree_data.src_db.complete_eclass_entries and \
10243 noise.corruption(cpv, "missing _eclasses_ field")
10247 # Even if _eclasses_ already exists, replace it with data from
10248 # eclass_cache, in order to insert local eclass paths.
10250 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
10252 # INHERITED contains a non-existent eclass.
10253 noise.eclass_stale(cpv)
10256 if eclasses is None:
10257 noise.eclass_stale(cpv)
10259 src['_eclasses_'] = eclasses
10261 src['_eclasses_'] = {}
10263 if not eapi_supported:
10265 'EAPI' : '-' + eapi,
10266 '_mtime_' : src['_mtime_'],
10267 '_eclasses_' : src['_eclasses_'],
10271 tree_data.dest_db[cpv] = src
10272 except CacheError, ce:
10273 noise.exception(cpv, ce)
10277 if onProgress is not None:
10278 onProgress(maxval, curval)
10280 if onProgress is not None:
10281 onProgress(maxval, curval)
10283 for tree_data in porttrees_data:
10285 dead_nodes = set(tree_data.dest_db.iterkeys())
10286 except CacheError, e:
10287 writemsg_level("Error listing cache entries for " + \
10288 "'%s': %s, continuing...\n" % (tree_data.path, e),
10289 level=logging.ERROR, noiselevel=-1)
10292 dead_nodes.difference_update(tree_data.valid_nodes)
10293 for cpv in dead_nodes:
10295 del tree_data.dest_db[cpv]
10296 except (KeyError, CacheError):
10300 # make sure the final progress is displayed
10301 progressHandler.display()
10303 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
10306 os.umask(old_umask)
10308 def action_regen(settings, portdb, max_jobs, max_load):
10309 xterm_titles = "notitles" not in settings.features
10310 emergelog(xterm_titles, " === regen")
10311 #regenerate cache entries
10312 portage.writemsg_stdout("Regenerating cache entries...\n")
10314 os.close(sys.stdin.fileno())
10315 except SystemExit, e:
10316 raise # Needed else can't exit
10321 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
10324 portage.writemsg_stdout("done!\n")
10325 return regen.returncode
10327 def action_config(settings, trees, myopts, myfiles):
10328 if len(myfiles) != 1:
10329 print red("!!! config can only take a single package atom at this time\n")
10331 if not is_valid_package_atom(myfiles[0]):
10332 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
10334 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
10335 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
10339 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
10340 except portage.exception.AmbiguousPackageName, e:
10341 # Multiple matches thrown from cpv_expand
10344 print "No packages found.\n"
10346 elif len(pkgs) > 1:
10347 if "--ask" in myopts:
10349 print "Please select a package to configure:"
10353 options.append(str(idx))
10354 print options[-1]+") "+pkg
10356 options.append("X")
10357 idx = userquery("Selection?", options)
10360 pkg = pkgs[int(idx)-1]
10362 print "The following packages available:"
10365 print "\nPlease use a specific atom or the --ask option."
10371 if "--ask" in myopts:
10372 if userquery("Ready to configure "+pkg+"?") == "No":
10375 print "Configuring pkg..."
10377 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
10378 mysettings = portage.config(clone=settings)
10379 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
10380 debug = mysettings.get("PORTAGE_DEBUG") == "1"
10381 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
10383 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
10384 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
10385 if retval == os.EX_OK:
10386 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
10387 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
10390 def action_info(settings, trees, myopts, myfiles):
10391 print getportageversion(settings["PORTDIR"], settings["ROOT"],
10392 settings.profile_path, settings["CHOST"],
10393 trees[settings["ROOT"]]["vartree"].dbapi)
10395 header_title = "System Settings"
10397 print header_width * "="
10398 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10399 print header_width * "="
10400 print "System uname: "+platform.platform(aliased=1)
10402 lastSync = portage.grabfile(os.path.join(
10403 settings["PORTDIR"], "metadata", "timestamp.chk"))
10404 print "Timestamp of tree:",
10410 output=commands.getstatusoutput("distcc --version")
10412 print str(output[1].split("\n",1)[0]),
10413 if "distcc" in settings.features:
10418 output=commands.getstatusoutput("ccache -V")
10420 print str(output[1].split("\n",1)[0]),
10421 if "ccache" in settings.features:
10426 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
10427 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
10428 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
10429 myvars = portage.util.unique_array(myvars)
10433 if portage.isvalidatom(x):
10434 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
10435 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
10436 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
10438 for pn, ver, rev in pkg_matches:
10440 pkgs.append(ver + "-" + rev)
10444 pkgs = ", ".join(pkgs)
10445 print "%-20s %s" % (x+":", pkgs)
10447 print "%-20s %s" % (x+":", "[NOT VALID]")
10449 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
10451 if "--verbose" in myopts:
10452 myvars=settings.keys()
10454 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
10455 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
10456 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
10457 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
10459 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
10461 myvars = portage.util.unique_array(myvars)
10462 use_expand = settings.get('USE_EXPAND', '').split()
10464 use_expand_hidden = set(
10465 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
10466 alphabetical_use = '--alphabetical' in myopts
10467 root_config = trees[settings["ROOT"]]['root_config']
10473 print '%s="%s"' % (x, settings[x])
10475 use = set(settings["USE"].split())
10476 for varname in use_expand:
10477 flag_prefix = varname.lower() + "_"
10478 for f in list(use):
10479 if f.startswith(flag_prefix):
10483 print 'USE="%s"' % " ".join(use),
10484 for varname in use_expand:
10485 myval = settings.get(varname)
10487 print '%s="%s"' % (varname, myval),
10490 unset_vars.append(x)
10492 print "Unset: "+", ".join(unset_vars)
10495 if "--debug" in myopts:
10496 for x in dir(portage):
10497 module = getattr(portage, x)
10498 if "cvs_id_string" in dir(module):
10499 print "%s: %s" % (str(x), str(module.cvs_id_string))
10501 # See if we can find any packages installed matching the strings
10502 # passed on the command line
10504 vardb = trees[settings["ROOT"]]["vartree"].dbapi
10505 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10507 mypkgs.extend(vardb.match(x))
10509 # If some packages were found...
10511 # Get our global settings (we only print stuff if it varies from
10512 # the current config)
10513 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
10514 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
10515 auxkeys.append('DEFINED_PHASES')
10517 pkgsettings = portage.config(clone=settings)
10519 # Loop through each package
10520 # Only print settings if they differ from global settings
10521 header_title = "Package Settings"
10522 print header_width * "="
10523 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10524 print header_width * "="
10525 from portage.output import EOutput
10528 # Get all package specific variables
10529 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
10530 pkg = Package(built=True, cpv=cpv,
10531 installed=True, metadata=izip(Package.metadata_keys,
10532 (metadata.get(x, '') for x in Package.metadata_keys)),
10533 root_config=root_config, type_name='installed')
10535 print "\n%s was built with the following:" % \
10536 colorize("INFORM", str(pkg.cpv))
10538 pkgsettings.setcpv(pkg)
10539 forced_flags = set(chain(pkgsettings.useforce,
10540 pkgsettings.usemask))
10541 use = set(pkg.use.enabled)
10542 use.discard(pkgsettings.get('ARCH'))
10543 use_expand_flags = set()
10546 for varname in use_expand:
10547 flag_prefix = varname.lower() + "_"
10549 if f.startswith(flag_prefix):
10550 use_expand_flags.add(f)
10551 use_enabled.setdefault(
10552 varname.upper(), []).append(f[len(flag_prefix):])
10554 for f in pkg.iuse.all:
10555 if f.startswith(flag_prefix):
10556 use_expand_flags.add(f)
10558 use_disabled.setdefault(
10559 varname.upper(), []).append(f[len(flag_prefix):])
10561 var_order = set(use_enabled)
10562 var_order.update(use_disabled)
10563 var_order = sorted(var_order)
10564 var_order.insert(0, 'USE')
10565 use.difference_update(use_expand_flags)
10566 use_enabled['USE'] = list(use)
10567 use_disabled['USE'] = []
10569 for f in pkg.iuse.all:
10570 if f not in use and \
10571 f not in use_expand_flags:
10572 use_disabled['USE'].append(f)
10574 for varname in var_order:
10575 if varname in use_expand_hidden:
10578 for f in use_enabled.get(varname, []):
10579 flags.append(UseFlagDisplay(f, True, f in forced_flags))
10580 for f in use_disabled.get(varname, []):
10581 flags.append(UseFlagDisplay(f, False, f in forced_flags))
10582 if alphabetical_use:
10583 flags.sort(key=UseFlagDisplay.sort_combined)
10585 flags.sort(key=UseFlagDisplay.sort_separated)
10586 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
10589 for myvar in mydesiredvars:
10590 if metadata[myvar].split() != settings.get(myvar, '').split():
10591 print "%s=\"%s\"" % (myvar, metadata[myvar])
10594 if metadata['DEFINED_PHASES']:
10595 if 'info' not in metadata['DEFINED_PHASES'].split():
10598 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
10599 ebuildpath = vardb.findname(pkg.cpv)
10600 if not ebuildpath or not os.path.exists(ebuildpath):
10601 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
10603 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
10604 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
10605 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
10608 def action_search(root_config, myopts, myfiles, spinner):
10610 print "emerge: no search terms provided."
10612 searchinstance = search(root_config,
10613 spinner, "--searchdesc" in myopts,
10614 "--quiet" not in myopts, "--usepkg" in myopts,
10615 "--usepkgonly" in myopts)
10616 for mysearch in myfiles:
10618 searchinstance.execute(mysearch)
10619 except re.error, comment:
10620 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
10622 searchinstance.output()
10624 def action_uninstall(settings, trees, ldpath_mtimes,
10625 opts, action, files, spinner):
10627 # For backward compat, some actions do not require leading '='.
10628 ignore_missing_eq = action in ('clean', 'unmerge')
10629 root = settings['ROOT']
10630 vardb = trees[root]['vartree'].dbapi
10634 # Ensure atoms are valid before calling unmerge().
10635 # For backward compat, leading '=' is not required.
10637 if is_valid_package_atom(x) or \
10638 (ignore_missing_eq and is_valid_package_atom('=' + x)):
10641 valid_atoms.append(
10642 portage.dep_expand(x, mydb=vardb, settings=settings))
10643 except portage.exception.AmbiguousPackageName, e:
10644 msg = "The short ebuild name \"" + x + \
10645 "\" is ambiguous. Please specify " + \
10646 "one of the following " + \
10647 "fully-qualified ebuild names instead:"
10648 for line in textwrap.wrap(msg, 70):
10649 writemsg_level("!!! %s\n" % (line,),
10650 level=logging.ERROR, noiselevel=-1)
10652 writemsg_level(" %s\n" % colorize("INFORM", i),
10653 level=logging.ERROR, noiselevel=-1)
10654 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
10657 elif x.startswith(os.sep):
10658 if not x.startswith(root):
10659 writemsg_level(("!!! '%s' does not start with" + \
10660 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
10662 # Queue these up since it's most efficient to handle
10663 # multiple files in a single iter_owners() call.
10664 lookup_owners.append(x)
10668 msg.append("'%s' is not a valid package atom." % (x,))
10669 msg.append("Please check ebuild(5) for full details.")
10670 writemsg_level("".join("!!! %s\n" % line for line in msg),
10671 level=logging.ERROR, noiselevel=-1)
10675 relative_paths = []
10676 search_for_multiple = False
10677 if len(lookup_owners) > 1:
10678 search_for_multiple = True
10680 for x in lookup_owners:
10681 if not search_for_multiple and os.path.isdir(x):
10682 search_for_multiple = True
10683 relative_paths.append(x[len(root):])
10686 for pkg, relative_path in \
10687 vardb._owners.iter_owners(relative_paths):
10688 owners.add(pkg.mycpv)
10689 if not search_for_multiple:
10694 slot = vardb.aux_get(cpv, ['SLOT'])[0]
10696 # portage now masks packages with missing slot, but it's
10697 # possible that one was installed by an older version
10698 atom = portage.cpv_getkey(cpv)
10700 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
10701 valid_atoms.append(portage.dep.Atom(atom))
10703 writemsg_level(("!!! '%s' is not claimed " + \
10704 "by any package.\n") % lookup_owners[0],
10705 level=logging.WARNING, noiselevel=-1)
10707 if files and not valid_atoms:
10710 if action in ('clean', 'unmerge') or \
10711 (action == 'prune' and "--nodeps" in opts):
10712 # When given a list of atoms, unmerge them in the order given.
10713 ordered = action == 'unmerge'
10714 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
10715 valid_atoms, ldpath_mtimes, ordered=ordered)
10717 elif action == 'deselect':
10718 rval = action_deselect(settings, trees, opts, valid_atoms)
10720 rval = action_depclean(settings, trees, ldpath_mtimes,
10721 opts, action, valid_atoms, spinner)
10725 def action_deselect(settings, trees, opts, atoms):
10726 root_config = trees[settings['ROOT']]['root_config']
10727 world_set = root_config.sets['world']
10728 if not hasattr(world_set, 'update'):
10729 writemsg_level("World set does not appear to be mutable.\n",
10730 level=logging.ERROR, noiselevel=-1)
10733 vardb = root_config.trees['vartree'].dbapi
10734 expanded_atoms = set(atoms)
10735 from portage.dep import Atom
10737 for cpv in vardb.match(atom):
10738 slot, = vardb.aux_get(cpv, ['SLOT'])
10741 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
10743 pretend = '--pretend' in opts
10745 if not pretend and hasattr(world_set, 'lock'):
10749 discard_atoms = set()
10751 for atom in world_set:
10752 if not isinstance(atom, Atom):
10755 for arg_atom in expanded_atoms:
10756 if arg_atom.intersects(atom) and \
10757 not (arg_atom.slot and not atom.slot):
10758 discard_atoms.add(atom)
10761 for atom in sorted(discard_atoms):
10762 print ">>> Removing %s from \"world\" favorites file..." % \
10763 colorize("INFORM", str(atom))
10765 if '--ask' in opts:
10766 prompt = "Would you like to remove these " + \
10767 "packages from your world favorites?"
10768 if userquery(prompt) == 'No':
10771 remaining = set(world_set)
10772 remaining.difference_update(discard_atoms)
10774 world_set.replace(remaining)
10776 print ">>> No matching atoms found in \"world\" favorites file..."
10782 def action_depclean(settings, trees, ldpath_mtimes,
10783 myopts, action, myfiles, spinner):
10784 # Kill packages that aren't explicitly merged or are required as a
10785 # dependency of another package. World file is explicit.
10787 # Global depclean or prune operations are not very safe when there are
10788 # missing dependencies since it's unknown how badly incomplete
10789 # the dependency graph is, and we might accidentally remove packages
10790 # that should have been pulled into the graph. On the other hand, it's
10791 # relatively safe to ignore missing deps when only asked to remove
10792 # specific packages.
10793 allow_missing_deps = len(myfiles) > 0
10796 msg.append("Always study the list of packages to be cleaned for any obvious\n")
10797 msg.append("mistakes. Packages that are part of the world set will always\n")
10798 msg.append("be kept. They can be manually added to this set with\n")
10799 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
10800 msg.append("package.provided (see portage(5)) will be removed by\n")
10801 msg.append("depclean, even if they are part of the world set.\n")
10803 msg.append("As a safety measure, depclean will not remove any packages\n")
10804 msg.append("unless *all* required dependencies have been resolved. As a\n")
10805 msg.append("consequence, it is often necessary to run %s\n" % \
10806 good("`emerge --update"))
10807 msg.append(good("--newuse --deep @system @world`") + \
10808 " prior to depclean.\n")
10810 if action == "depclean" and "--quiet" not in myopts and not myfiles:
10811 portage.writemsg_stdout("\n")
10813 portage.writemsg_stdout(colorize("WARN", " * ") + x)
10815 xterm_titles = "notitles" not in settings.features
10816 myroot = settings["ROOT"]
10817 root_config = trees[myroot]["root_config"]
10818 getSetAtoms = root_config.setconfig.getSetAtoms
10819 vardb = trees[myroot]["vartree"].dbapi
10820 deselect = myopts.get('--deselect') != 'n'
10822 required_set_names = ("system", "world")
10826 for s in required_set_names:
10827 required_sets[s] = InternalPackageSet(
10828 initial_atoms=getSetAtoms(s))
10831 # When removing packages, use a temporary version of world
10832 # which excludes packages that are intended to be eligible for
10834 world_temp_set = required_sets["world"]
10835 system_set = required_sets["system"]
10837 if not system_set or not world_temp_set:
10840 writemsg_level("!!! You have no system list.\n",
10841 level=logging.ERROR, noiselevel=-1)
10843 if not world_temp_set:
10844 writemsg_level("!!! You have no world file.\n",
10845 level=logging.WARNING, noiselevel=-1)
10847 writemsg_level("!!! Proceeding is likely to " + \
10848 "break your installation.\n",
10849 level=logging.WARNING, noiselevel=-1)
10850 if "--pretend" not in myopts:
10851 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
10853 if action == "depclean":
10854 emergelog(xterm_titles, " >>> depclean")
10857 args_set = InternalPackageSet()
10859 args_set.update(myfiles)
10860 matched_packages = False
10863 matched_packages = True
10865 if not matched_packages:
10866 writemsg_level(">>> No packages selected for removal by %s\n" % \
10870 writemsg_level("\nCalculating dependencies ")
10871 resolver_params = create_depgraph_params(myopts, "remove")
10872 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
10873 vardb = resolver.trees[myroot]["vartree"].dbapi
10875 if action == "depclean":
10880 world_temp_set.clear()
10882 # Pull in everything that's installed but not matched
10883 # by an argument atom since we don't want to clean any
10884 # package if something depends on it.
10889 if args_set.findAtomForPackage(pkg) is None:
10890 world_temp_set.add("=" + pkg.cpv)
10892 except portage.exception.InvalidDependString, e:
10893 show_invalid_depstring_notice(pkg,
10894 pkg.metadata["PROVIDE"], str(e))
10896 world_temp_set.add("=" + pkg.cpv)
10899 elif action == "prune":
10902 world_temp_set.clear()
10904 # Pull in everything that's installed since we don't
10905 # to prune a package if something depends on it.
10906 world_temp_set.update(vardb.cp_all())
10910 # Try to prune everything that's slotted.
10911 for cp in vardb.cp_all():
10912 if len(vardb.cp_list(cp)) > 1:
10915 # Remove atoms from world that match installed packages
10916 # that are also matched by argument atoms, but do not remove
10917 # them if they match the highest installed version.
10920 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
10921 if not pkgs_for_cp or pkg not in pkgs_for_cp:
10922 raise AssertionError("package expected in matches: " + \
10923 "cp = %s, cpv = %s matches = %s" % \
10924 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
10926 highest_version = pkgs_for_cp[-1]
10927 if pkg == highest_version:
10928 # pkg is the highest version
10929 world_temp_set.add("=" + pkg.cpv)
10932 if len(pkgs_for_cp) <= 1:
10933 raise AssertionError("more packages expected: " + \
10934 "cp = %s, cpv = %s matches = %s" % \
10935 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
10938 if args_set.findAtomForPackage(pkg) is None:
10939 world_temp_set.add("=" + pkg.cpv)
10941 except portage.exception.InvalidDependString, e:
10942 show_invalid_depstring_notice(pkg,
10943 pkg.metadata["PROVIDE"], str(e))
10945 world_temp_set.add("=" + pkg.cpv)
10949 for s, package_set in required_sets.iteritems():
10950 set_atom = SETPREFIX + s
10951 set_arg = SetArg(arg=set_atom, set=package_set,
10952 root_config=resolver.roots[myroot])
10953 set_args[s] = set_arg
10954 for atom in set_arg.set:
10955 resolver._dep_stack.append(
10956 Dependency(atom=atom, root=myroot, parent=set_arg))
10957 resolver.digraph.add(set_arg, None)
10959 success = resolver._complete_graph()
10960 writemsg_level("\b\b... done!\n")
10962 resolver.display_problems()
10967 def unresolved_deps():
10969 unresolvable = set()
10970 for dep in resolver._initially_unsatisfied_deps:
10971 if isinstance(dep.parent, Package) and \
10972 (dep.priority > UnmergeDepPriority.SOFT):
10973 unresolvable.add((dep.atom, dep.parent.cpv))
10975 if not unresolvable:
10978 if unresolvable and not allow_missing_deps:
10979 prefix = bad(" * ")
10981 msg.append("Dependencies could not be completely resolved due to")
10982 msg.append("the following required packages not being installed:")
10984 for atom, parent in unresolvable:
10985 msg.append(" %s pulled in by:" % (atom,))
10986 msg.append(" %s" % (parent,))
10988 msg.append("Have you forgotten to run " + \
10989 good("`emerge --update --newuse --deep @system @world`") + " prior")
10990 msg.append(("to %s? It may be necessary to manually " + \
10991 "uninstall packages that no longer") % action)
10992 msg.append("exist in the portage tree since " + \
10993 "it may not be possible to satisfy their")
10994 msg.append("dependencies. Also, be aware of " + \
10995 "the --with-bdeps option that is documented")
10996 msg.append("in " + good("`man emerge`") + ".")
10997 if action == "prune":
10999 msg.append("If you would like to ignore " + \
11000 "dependencies then use %s." % good("--nodeps"))
11001 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
11002 level=logging.ERROR, noiselevel=-1)
11006 if unresolved_deps():
11009 graph = resolver.digraph.copy()
11010 required_pkgs_total = 0
11012 if isinstance(node, Package):
11013 required_pkgs_total += 1
11015 def show_parents(child_node):
11016 parent_nodes = graph.parent_nodes(child_node)
11017 if not parent_nodes:
11018 # With --prune, the highest version can be pulled in without any
11019 # real parent since all installed packages are pulled in. In that
11020 # case there's nothing to show here.
11023 for node in parent_nodes:
11024 parent_strs.append(str(getattr(node, "cpv", node)))
11027 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
11028 for parent_str in parent_strs:
11029 msg.append(" %s\n" % (parent_str,))
11031 portage.writemsg_stdout("".join(msg), noiselevel=-1)
11033 def cmp_pkg_cpv(pkg1, pkg2):
11034 """Sort Package instances by cpv."""
11035 if pkg1.cpv > pkg2.cpv:
11037 elif pkg1.cpv == pkg2.cpv:
11042 def create_cleanlist():
11043 pkgs_to_remove = []
11045 if action == "depclean":
11048 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
11051 arg_atom = args_set.findAtomForPackage(pkg)
11052 except portage.exception.InvalidDependString:
11053 # this error has already been displayed by now
11057 if pkg not in graph:
11058 pkgs_to_remove.append(pkg)
11059 elif "--verbose" in myopts:
11063 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
11064 if pkg not in graph:
11065 pkgs_to_remove.append(pkg)
11066 elif "--verbose" in myopts:
11069 elif action == "prune":
11070 # Prune really uses all installed instead of world. It's not
11071 # a real reverse dependency so don't display it as such.
11072 graph.remove(set_args["world"])
11074 for atom in args_set:
11075 for pkg in vardb.match_pkgs(atom):
11076 if pkg not in graph:
11077 pkgs_to_remove.append(pkg)
11078 elif "--verbose" in myopts:
11081 if not pkgs_to_remove:
11083 ">>> No packages selected for removal by %s\n" % action)
11084 if "--verbose" not in myopts:
11086 ">>> To see reverse dependencies, use %s\n" % \
11088 if action == "prune":
11090 ">>> To ignore dependencies, use %s\n" % \
11093 return pkgs_to_remove
11095 cleanlist = create_cleanlist()
11098 clean_set = set(cleanlist)
11100 # Check if any of these package are the sole providers of libraries
11101 # with consumers that have not been selected for removal. If so, these
11102 # packages and any dependencies need to be added to the graph.
11103 real_vardb = trees[myroot]["vartree"].dbapi
11104 linkmap = real_vardb.linkmap
11105 liblist = linkmap.listLibraryObjects()
11106 consumer_cache = {}
11107 provider_cache = {}
11111 writemsg_level(">>> Checking for lib consumers...\n")
11113 for pkg in cleanlist:
11114 pkg_dblink = real_vardb._dblink(pkg.cpv)
11115 provided_libs = set()
11117 for lib in liblist:
11118 if pkg_dblink.isowner(lib, myroot):
11119 provided_libs.add(lib)
11121 if not provided_libs:
11125 for lib in provided_libs:
11126 lib_consumers = consumer_cache.get(lib)
11127 if lib_consumers is None:
11128 lib_consumers = linkmap.findConsumers(lib)
11129 consumer_cache[lib] = lib_consumers
11131 consumers[lib] = lib_consumers
11136 for lib, lib_consumers in consumers.items():
11137 for consumer_file in list(lib_consumers):
11138 if pkg_dblink.isowner(consumer_file, myroot):
11139 lib_consumers.remove(consumer_file)
11140 if not lib_consumers:
11146 for lib, lib_consumers in consumers.iteritems():
11148 soname = soname_cache.get(lib)
11150 soname = linkmap.getSoname(lib)
11151 soname_cache[lib] = soname
11153 consumer_providers = []
11154 for lib_consumer in lib_consumers:
11155 providers = provider_cache.get(lib)
11156 if providers is None:
11157 providers = linkmap.findProviders(lib_consumer)
11158 provider_cache[lib_consumer] = providers
11159 if soname not in providers:
11160 # Why does this happen?
11162 consumer_providers.append(
11163 (lib_consumer, providers[soname]))
11165 consumers[lib] = consumer_providers
11167 consumer_map[pkg] = consumers
11171 search_files = set()
11172 for consumers in consumer_map.itervalues():
11173 for lib, consumer_providers in consumers.iteritems():
11174 for lib_consumer, providers in consumer_providers:
11175 search_files.add(lib_consumer)
11176 search_files.update(providers)
11178 writemsg_level(">>> Assigning files to packages...\n")
11179 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
11181 for pkg, consumers in consumer_map.items():
11182 for lib, consumer_providers in consumers.items():
11183 lib_consumers = set()
11185 for lib_consumer, providers in consumer_providers:
11186 owner_set = file_owners.get(lib_consumer)
11187 provider_dblinks = set()
11188 provider_pkgs = set()
11190 if len(providers) > 1:
11191 for provider in providers:
11192 provider_set = file_owners.get(provider)
11193 if provider_set is not None:
11194 provider_dblinks.update(provider_set)
11196 if len(provider_dblinks) > 1:
11197 for provider_dblink in provider_dblinks:
11198 pkg_key = ("installed", myroot,
11199 provider_dblink.mycpv, "nomerge")
11200 if pkg_key not in clean_set:
11201 provider_pkgs.add(vardb.get(pkg_key))
11206 if owner_set is not None:
11207 lib_consumers.update(owner_set)
11209 for consumer_dblink in list(lib_consumers):
11210 if ("installed", myroot, consumer_dblink.mycpv,
11211 "nomerge") in clean_set:
11212 lib_consumers.remove(consumer_dblink)
11216 consumers[lib] = lib_consumers
11220 del consumer_map[pkg]
11223 # TODO: Implement a package set for rebuilding consumer packages.
11225 msg = "In order to avoid breakage of link level " + \
11226 "dependencies, one or more packages will not be removed. " + \
11227 "This can be solved by rebuilding " + \
11228 "the packages that pulled them in."
11230 prefix = bad(" * ")
11231 from textwrap import wrap
11232 writemsg_level("".join(prefix + "%s\n" % line for \
11233 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
11236 for pkg, consumers in consumer_map.iteritems():
11237 unique_consumers = set(chain(*consumers.values()))
11238 unique_consumers = sorted(consumer.mycpv \
11239 for consumer in unique_consumers)
11241 msg.append(" %s pulled in by:" % (pkg.cpv,))
11242 for consumer in unique_consumers:
11243 msg.append(" %s" % (consumer,))
11245 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
11246 level=logging.WARNING, noiselevel=-1)
11248 # Add lib providers to the graph as children of lib consumers,
11249 # and also add any dependencies pulled in by the provider.
11250 writemsg_level(">>> Adding lib providers to graph...\n")
11252 for pkg, consumers in consumer_map.iteritems():
11253 for consumer_dblink in set(chain(*consumers.values())):
11254 consumer_pkg = vardb.get(("installed", myroot,
11255 consumer_dblink.mycpv, "nomerge"))
11256 if not resolver._add_pkg(pkg,
11257 Dependency(parent=consumer_pkg,
11258 priority=UnmergeDepPriority(runtime=True),
11260 resolver.display_problems()
11263 writemsg_level("\nCalculating dependencies ")
11264 success = resolver._complete_graph()
11265 writemsg_level("\b\b... done!\n")
11266 resolver.display_problems()
11269 if unresolved_deps():
11272 graph = resolver.digraph.copy()
11273 required_pkgs_total = 0
11275 if isinstance(node, Package):
11276 required_pkgs_total += 1
11277 cleanlist = create_cleanlist()
11280 clean_set = set(cleanlist)
11282 # Use a topological sort to create an unmerge order such that
11283 # each package is unmerged before it's dependencies. This is
11284 # necessary to avoid breaking things that may need to run
11285 # during pkg_prerm or pkg_postrm phases.
11287 # Create a new graph to account for dependencies between the
11288 # packages being unmerged.
11292 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
11293 runtime = UnmergeDepPriority(runtime=True)
11294 runtime_post = UnmergeDepPriority(runtime_post=True)
11295 buildtime = UnmergeDepPriority(buildtime=True)
11297 "RDEPEND": runtime,
11298 "PDEPEND": runtime_post,
11299 "DEPEND": buildtime,
11302 for node in clean_set:
11303 graph.add(node, None)
11305 node_use = node.metadata["USE"].split()
11306 for dep_type in dep_keys:
11307 depstr = node.metadata[dep_type]
11311 portage.dep._dep_check_strict = False
11312 success, atoms = portage.dep_check(depstr, None, settings,
11313 myuse=node_use, trees=resolver._graph_trees,
11316 portage.dep._dep_check_strict = True
11318 # Ignore invalid deps of packages that will
11319 # be uninstalled anyway.
11322 priority = priority_map[dep_type]
11324 if not isinstance(atom, portage.dep.Atom):
11325 # Ignore invalid atoms returned from dep_check().
11329 matches = vardb.match_pkgs(atom)
11332 for child_node in matches:
11333 if child_node in clean_set:
11334 graph.add(child_node, node, priority=priority)
11337 if len(graph.order) == len(graph.root_nodes()):
11338 # If there are no dependencies between packages
11339 # let unmerge() group them by cat/pn.
11341 cleanlist = [pkg.cpv for pkg in graph.order]
11343 # Order nodes from lowest to highest overall reference count for
11344 # optimal root node selection.
11345 node_refcounts = {}
11346 for node in graph.order:
11347 node_refcounts[node] = len(graph.parent_nodes(node))
11348 def cmp_reference_count(node1, node2):
11349 return node_refcounts[node1] - node_refcounts[node2]
11350 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
11352 ignore_priority_range = [None]
11353 ignore_priority_range.extend(
11354 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
11355 while not graph.empty():
11356 for ignore_priority in ignore_priority_range:
11357 nodes = graph.root_nodes(ignore_priority=ignore_priority)
11361 raise AssertionError("no root nodes")
11362 if ignore_priority is not None:
11363 # Some deps have been dropped due to circular dependencies,
11364 # so only pop one node in order do minimize the number that
11369 cleanlist.append(node.cpv)
11371 unmerge(root_config, myopts, "unmerge", cleanlist,
11372 ldpath_mtimes, ordered=ordered)
11374 if action == "prune":
11377 if not cleanlist and "--quiet" in myopts:
11380 print "Packages installed: "+str(len(vardb.cpv_all()))
11381 print "Packages in world: " + \
11382 str(len(root_config.sets["world"].getAtoms()))
11383 print "Packages in system: " + \
11384 str(len(root_config.sets["system"].getAtoms()))
11385 print "Required packages: "+str(required_pkgs_total)
11386 if "--pretend" in myopts:
11387 print "Number to remove: "+str(len(cleanlist))
11389 print "Number removed: "+str(len(cleanlist))
11391 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
11393 Construct a depgraph for the given resume list. This will raise
11394 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
11396 @returns: (success, depgraph, dropped_tasks)
11399 skip_unsatisfied = True
11400 mergelist = mtimedb["resume"]["mergelist"]
11401 dropped_tasks = set()
11403 mydepgraph = depgraph(settings, trees,
11404 myopts, myparams, spinner)
11406 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
11407 skip_masked=skip_masked)
11408 except depgraph.UnsatisfiedResumeDep, e:
11409 if not skip_unsatisfied:
11412 graph = mydepgraph.digraph
11413 unsatisfied_parents = dict((dep.parent, dep.parent) \
11414 for dep in e.value)
11415 traversed_nodes = set()
11416 unsatisfied_stack = list(unsatisfied_parents)
11417 while unsatisfied_stack:
11418 pkg = unsatisfied_stack.pop()
11419 if pkg in traversed_nodes:
11421 traversed_nodes.add(pkg)
11423 # If this package was pulled in by a parent
11424 # package scheduled for merge, removing this
11425 # package may cause the the parent package's
11426 # dependency to become unsatisfied.
11427 for parent_node in graph.parent_nodes(pkg):
11428 if not isinstance(parent_node, Package) \
11429 or parent_node.operation not in ("merge", "nomerge"):
11432 graph.child_nodes(parent_node,
11433 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
11434 if pkg in unsatisfied:
11435 unsatisfied_parents[parent_node] = parent_node
11436 unsatisfied_stack.append(parent_node)
11438 pruned_mergelist = []
11439 for x in mergelist:
11440 if isinstance(x, list) and \
11441 tuple(x) not in unsatisfied_parents:
11442 pruned_mergelist.append(x)
11444 # If the mergelist doesn't shrink then this loop is infinite.
11445 if len(pruned_mergelist) == len(mergelist):
11446 # This happens if a package can't be dropped because
11447 # it's already installed, but it has unsatisfied PDEPEND.
11449 mergelist[:] = pruned_mergelist
11451 # Exclude installed packages that have been removed from the graph due
11452 # to failure to build/install runtime dependencies after the dependent
11453 # package has already been installed.
11454 dropped_tasks.update(pkg for pkg in \
11455 unsatisfied_parents if pkg.operation != "nomerge")
11456 mydepgraph.break_refs(unsatisfied_parents)
11458 del e, graph, traversed_nodes, \
11459 unsatisfied_parents, unsatisfied_stack
11463 return (success, mydepgraph, dropped_tasks)
11465 def action_build(settings, trees, mtimedb,
11466 myopts, myaction, myfiles, spinner):
11468 # validate the state of the resume data
11469 # so that we can make assumptions later.
11470 for k in ("resume", "resume_backup"):
11471 if k not in mtimedb:
11473 resume_data = mtimedb[k]
11474 if not isinstance(resume_data, dict):
11477 mergelist = resume_data.get("mergelist")
11478 if not isinstance(mergelist, list):
11481 for x in mergelist:
11482 if not (isinstance(x, list) and len(x) == 4):
11484 pkg_type, pkg_root, pkg_key, pkg_action = x
11485 if pkg_root not in trees:
11486 # Current $ROOT setting differs,
11487 # so the list must be stale.
11493 resume_opts = resume_data.get("myopts")
11494 if not isinstance(resume_opts, (dict, list)):
11497 favorites = resume_data.get("favorites")
11498 if not isinstance(favorites, list):
11503 if "--resume" in myopts and \
11504 ("resume" in mtimedb or
11505 "resume_backup" in mtimedb):
11507 if "resume" not in mtimedb:
11508 mtimedb["resume"] = mtimedb["resume_backup"]
11509 del mtimedb["resume_backup"]
11511 # "myopts" is a list for backward compatibility.
11512 resume_opts = mtimedb["resume"].get("myopts", [])
11513 if isinstance(resume_opts, list):
11514 resume_opts = dict((k,True) for k in resume_opts)
11515 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
11516 resume_opts.pop(opt, None)
11518 # Current options always override resume_opts.
11519 resume_opts.update(myopts)
11521 myopts.update(resume_opts)
11523 if "--debug" in myopts:
11524 writemsg_level("myopts %s\n" % (myopts,))
11526 # Adjust config according to options of the command being resumed.
11527 for myroot in trees:
11528 mysettings = trees[myroot]["vartree"].settings
11529 mysettings.unlock()
11530 adjust_config(myopts, mysettings)
11532 del myroot, mysettings
11534 ldpath_mtimes = mtimedb["ldpath"]
11537 buildpkgonly = "--buildpkgonly" in myopts
11538 pretend = "--pretend" in myopts
11539 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
11540 ask = "--ask" in myopts
11541 nodeps = "--nodeps" in myopts
11542 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
11543 tree = "--tree" in myopts
11544 if nodeps and tree:
11546 del myopts["--tree"]
11547 portage.writemsg(colorize("WARN", " * ") + \
11548 "--tree is broken with --nodeps. Disabling...\n")
11549 debug = "--debug" in myopts
11550 verbose = "--verbose" in myopts
11551 quiet = "--quiet" in myopts
11552 if pretend or fetchonly:
11553 # make the mtimedb readonly
11554 mtimedb.filename = None
11555 if '--digest' in myopts or 'digest' in settings.features:
11556 if '--digest' in myopts:
11557 msg = "The --digest option"
11559 msg = "The FEATURES=digest setting"
11561 msg += " can prevent corruption from being" + \
11562 " noticed. The `repoman manifest` command is the preferred" + \
11563 " way to generate manifests and it is capable of doing an" + \
11564 " entire repository or category at once."
11565 prefix = bad(" * ")
11566 writemsg(prefix + "\n")
11567 from textwrap import wrap
11568 for line in wrap(msg, 72):
11569 writemsg("%s%s\n" % (prefix, line))
11570 writemsg(prefix + "\n")
11572 if "--quiet" not in myopts and \
11573 ("--pretend" in myopts or "--ask" in myopts or \
11574 "--tree" in myopts or "--verbose" in myopts):
11576 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
11578 elif "--buildpkgonly" in myopts:
11582 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
11584 print darkgreen("These are the packages that would be %s, in reverse order:") % action
11588 print darkgreen("These are the packages that would be %s, in order:") % action
11591 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
11592 if not show_spinner:
11593 spinner.update = spinner.update_quiet
11596 favorites = mtimedb["resume"].get("favorites")
11597 if not isinstance(favorites, list):
11601 print "Calculating dependencies ",
11602 myparams = create_depgraph_params(myopts, myaction)
11604 resume_data = mtimedb["resume"]
11605 mergelist = resume_data["mergelist"]
11606 if mergelist and "--skipfirst" in myopts:
11607 for i, task in enumerate(mergelist):
11608 if isinstance(task, list) and \
11609 task and task[-1] == "merge":
11616 success, mydepgraph, dropped_tasks = resume_depgraph(
11617 settings, trees, mtimedb, myopts, myparams, spinner)
11618 except (portage.exception.PackageNotFound,
11619 depgraph.UnsatisfiedResumeDep), e:
11620 if isinstance(e, depgraph.UnsatisfiedResumeDep):
11621 mydepgraph = e.depgraph
11624 from textwrap import wrap
11625 from portage.output import EOutput
11628 resume_data = mtimedb["resume"]
11629 mergelist = resume_data.get("mergelist")
11630 if not isinstance(mergelist, list):
11632 if mergelist and debug or (verbose and not quiet):
11633 out.eerror("Invalid resume list:")
11636 for task in mergelist:
11637 if isinstance(task, list):
11638 out.eerror(indent + str(tuple(task)))
11641 if isinstance(e, depgraph.UnsatisfiedResumeDep):
11642 out.eerror("One or more packages are either masked or " + \
11643 "have missing dependencies:")
11646 for dep in e.value:
11647 if dep.atom is None:
11648 out.eerror(indent + "Masked package:")
11649 out.eerror(2 * indent + str(dep.parent))
11652 out.eerror(indent + str(dep.atom) + " pulled in by:")
11653 out.eerror(2 * indent + str(dep.parent))
11655 msg = "The resume list contains packages " + \
11656 "that are either masked or have " + \
11657 "unsatisfied dependencies. " + \
11658 "Please restart/continue " + \
11659 "the operation manually, or use --skipfirst " + \
11660 "to skip the first package in the list and " + \
11661 "any other packages that may be " + \
11662 "masked or have missing dependencies."
11663 for line in wrap(msg, 72):
11665 elif isinstance(e, portage.exception.PackageNotFound):
11666 out.eerror("An expected package is " + \
11667 "not available: %s" % str(e))
11669 msg = "The resume list contains one or more " + \
11670 "packages that are no longer " + \
11671 "available. Please restart/continue " + \
11672 "the operation manually."
11673 for line in wrap(msg, 72):
11677 print "\b\b... done!"
11681 portage.writemsg("!!! One or more packages have been " + \
11682 "dropped due to\n" + \
11683 "!!! masking or unsatisfied dependencies:\n\n",
11685 for task in dropped_tasks:
11686 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
11687 portage.writemsg("\n", noiselevel=-1)
11690 if mydepgraph is not None:
11691 mydepgraph.display_problems()
11692 if not (ask or pretend):
11693 # delete the current list and also the backup
11694 # since it's probably stale too.
11695 for k in ("resume", "resume_backup"):
11696 mtimedb.pop(k, None)
11701 if ("--resume" in myopts):
11702 print darkgreen("emerge: It seems we have nothing to resume...")
11705 myparams = create_depgraph_params(myopts, myaction)
11706 if "--quiet" not in myopts and "--nodeps" not in myopts:
11707 print "Calculating dependencies ",
11709 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
11711 retval, favorites = mydepgraph.select_files(myfiles)
11712 except portage.exception.PackageNotFound, e:
11713 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
11715 except portage.exception.PackageSetNotFound, e:
11716 root_config = trees[settings["ROOT"]]["root_config"]
11717 display_missing_pkg_set(root_config, e.value)
11720 print "\b\b... done!"
11722 mydepgraph.display_problems()
11725 if "--pretend" not in myopts and \
11726 ("--ask" in myopts or "--tree" in myopts or \
11727 "--verbose" in myopts) and \
11728 not ("--quiet" in myopts and "--ask" not in myopts):
11729 if "--resume" in myopts:
11730 mymergelist = mydepgraph.altlist()
11731 if len(mymergelist) == 0:
11732 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
11734 favorites = mtimedb["resume"]["favorites"]
11735 retval = mydepgraph.display(
11736 mydepgraph.altlist(reversed=tree),
11737 favorites=favorites)
11738 mydepgraph.display_problems()
11739 if retval != os.EX_OK:
11741 prompt="Would you like to resume merging these packages?"
11743 retval = mydepgraph.display(
11744 mydepgraph.altlist(reversed=("--tree" in myopts)),
11745 favorites=favorites)
11746 mydepgraph.display_problems()
11747 if retval != os.EX_OK:
11750 for x in mydepgraph.altlist():
11751 if isinstance(x, Package) and x.operation == "merge":
11755 sets = trees[settings["ROOT"]]["root_config"].sets
11756 world_candidates = None
11757 if "--noreplace" in myopts and \
11758 not oneshot and favorites:
11759 # Sets that are not world candidates are filtered
11760 # out here since the favorites list needs to be
11761 # complete for depgraph.loadResumeCommand() to
11762 # operate correctly.
11763 world_candidates = [x for x in favorites \
11764 if not (x.startswith(SETPREFIX) and \
11765 not sets[x[1:]].world_candidate)]
11766 if "--noreplace" in myopts and \
11767 not oneshot and world_candidates:
11769 for x in world_candidates:
11770 print " %s %s" % (good("*"), x)
11771 prompt="Would you like to add these packages to your world favorites?"
11772 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
11773 prompt="Nothing to merge; would you like to auto-clean packages?"
11776 print "Nothing to merge; quitting."
11779 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
11780 prompt="Would you like to fetch the source files for these packages?"
11782 prompt="Would you like to merge these packages?"
11784 if "--ask" in myopts and userquery(prompt) == "No":
11789 # Don't ask again (e.g. when auto-cleaning packages after merge)
11790 myopts.pop("--ask", None)
11792 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
11793 if ("--resume" in myopts):
11794 mymergelist = mydepgraph.altlist()
11795 if len(mymergelist) == 0:
11796 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
11798 favorites = mtimedb["resume"]["favorites"]
11799 retval = mydepgraph.display(
11800 mydepgraph.altlist(reversed=tree),
11801 favorites=favorites)
11802 mydepgraph.display_problems()
11803 if retval != os.EX_OK:
11806 retval = mydepgraph.display(
11807 mydepgraph.altlist(reversed=("--tree" in myopts)),
11808 favorites=favorites)
11809 mydepgraph.display_problems()
11810 if retval != os.EX_OK:
11812 if "--buildpkgonly" in myopts:
11813 graph_copy = mydepgraph.digraph.clone()
11814 removed_nodes = set()
11815 for node in graph_copy:
11816 if not isinstance(node, Package) or \
11817 node.operation == "nomerge":
11818 removed_nodes.add(node)
11819 graph_copy.difference_update(removed_nodes)
11820 if not graph_copy.hasallzeros(ignore_priority = \
11821 DepPrioritySatisfiedRange.ignore_medium):
11822 print "\n!!! --buildpkgonly requires all dependencies to be merged."
11823 print "!!! You have to merge the dependencies before you can build this package.\n"
11826 if "--buildpkgonly" in myopts:
11827 graph_copy = mydepgraph.digraph.clone()
11828 removed_nodes = set()
11829 for node in graph_copy:
11830 if not isinstance(node, Package) or \
11831 node.operation == "nomerge":
11832 removed_nodes.add(node)
11833 graph_copy.difference_update(removed_nodes)
11834 if not graph_copy.hasallzeros(ignore_priority = \
11835 DepPrioritySatisfiedRange.ignore_medium):
11836 print "\n!!! --buildpkgonly requires all dependencies to be merged."
11837 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
11840 if ("--resume" in myopts):
11841 favorites=mtimedb["resume"]["favorites"]
11842 mymergelist = mydepgraph.altlist()
11843 mydepgraph.break_refs(mymergelist)
11844 mergetask = Scheduler(settings, trees, mtimedb, myopts,
11845 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
11846 del mydepgraph, mymergelist
11847 clear_caches(trees)
11849 retval = mergetask.merge()
11850 merge_count = mergetask.curval
11852 if "resume" in mtimedb and \
11853 "mergelist" in mtimedb["resume"] and \
11854 len(mtimedb["resume"]["mergelist"]) > 1:
11855 mtimedb["resume_backup"] = mtimedb["resume"]
11856 del mtimedb["resume"]
11858 mtimedb["resume"]={}
11859 # Stored as a dict starting with portage-2.1.6_rc1, and supported
11860 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
11861 # a list type for options.
11862 mtimedb["resume"]["myopts"] = myopts.copy()
11864 # Convert Atom instances to plain str.
11865 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
11867 pkglist = mydepgraph.altlist()
11868 mydepgraph.saveNomergeFavorites()
11869 mydepgraph.break_refs(pkglist)
11870 mergetask = Scheduler(settings, trees, mtimedb, myopts,
11871 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
11872 del mydepgraph, pkglist
11873 clear_caches(trees)
11875 retval = mergetask.merge()
11876 merge_count = mergetask.curval
11878 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
11879 if "yes" == settings.get("AUTOCLEAN"):
11880 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
11881 unmerge(trees[settings["ROOT"]]["root_config"],
11882 myopts, "clean", [],
11883 ldpath_mtimes, autoclean=1)
11885 portage.writemsg_stdout(colorize("WARN", "WARNING:")
11886 + " AUTOCLEAN is disabled. This can cause serious"
11887 + " problems due to overlapping packages.\n")
11888 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
11892 def multiple_actions(action1, action2):
11893 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
11894 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
11897 def insert_optional_args(args):
11899 Parse optional arguments and insert a value if one has
11900 not been provided. This is done before feeding the args
11901 to the optparse parser since that parser does not support
11902 this feature natively.
11906 jobs_opts = ("-j", "--jobs")
11907 default_arg_opts = {
11908 '--deselect' : ('n',),
11909 '--root-deps' : ('rdeps',),
11911 arg_stack = args[:]
11912 arg_stack.reverse()
11914 arg = arg_stack.pop()
11916 default_arg_choices = default_arg_opts.get(arg)
11917 if default_arg_choices is not None:
11918 new_args.append(arg)
11919 if arg_stack and arg_stack[-1] in default_arg_choices:
11920 new_args.append(arg_stack.pop())
11922 # insert default argument
11923 new_args.append('True')
11926 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
11927 if not (short_job_opt or arg in jobs_opts):
11928 new_args.append(arg)
11931 # Insert an empty placeholder in order to
11932 # satisfy the requirements of optparse.
11934 new_args.append("--jobs")
11937 if short_job_opt and len(arg) > 2:
11938 if arg[:2] == "-j":
11940 job_count = int(arg[2:])
11942 saved_opts = arg[2:]
11945 saved_opts = arg[1:].replace("j", "")
11947 if job_count is None and arg_stack:
11949 job_count = int(arg_stack[-1])
11953 # Discard the job count from the stack
11954 # since we're consuming it here.
11957 if job_count is None:
11958 # unlimited number of jobs
11959 new_args.append("True")
11961 new_args.append(str(job_count))
11963 if saved_opts is not None:
11964 new_args.append("-" + saved_opts)
11968 def parse_opts(tmpcmdline, silent=False):
11973 global actions, options, shortmapping
11975 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
11976 argument_options = {
11978 "help":"specify the location for portage configuration files",
11982 "help":"enable or disable color output",
11984 "choices":("y", "n")
11988 "help" : "remove atoms from the world file",
11990 "choices" : ("True", "n")
11995 "help" : "Specifies the number of packages to build " + \
12001 "--load-average": {
12003 "help" :"Specifies that no new builds should be started " + \
12004 "if there are other builds running and the load average " + \
12005 "is at least LOAD (a floating-point number).",
12011 "help":"include unnecessary build time dependencies",
12013 "choices":("y", "n")
12016 "help":"specify conditions to trigger package reinstallation",
12018 "choices":["changed-use"]
12021 "help" : "specify the target root filesystem for merging packages",
12026 "help" : "modify interpretation of depedencies",
12028 "choices" :("True", "rdeps")
12032 from optparse import OptionParser
12033 parser = OptionParser()
12034 if parser.has_option("--help"):
12035 parser.remove_option("--help")
12037 for action_opt in actions:
12038 parser.add_option("--" + action_opt, action="store_true",
12039 dest=action_opt.replace("-", "_"), default=False)
12040 for myopt in options:
12041 parser.add_option(myopt, action="store_true",
12042 dest=myopt.lstrip("--").replace("-", "_"), default=False)
12043 for shortopt, longopt in shortmapping.iteritems():
12044 parser.add_option("-" + shortopt, action="store_true",
12045 dest=longopt.lstrip("--").replace("-", "_"), default=False)
12046 for myalias, myopt in longopt_aliases.iteritems():
12047 parser.add_option(myalias, action="store_true",
12048 dest=myopt.lstrip("--").replace("-", "_"), default=False)
12050 for myopt, kwargs in argument_options.iteritems():
12051 parser.add_option(myopt,
12052 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
12054 tmpcmdline = insert_optional_args(tmpcmdline)
12056 myoptions, myargs = parser.parse_args(args=tmpcmdline)
12058 if myoptions.deselect == "True":
12059 myoptions.deselect = True
12061 if myoptions.root_deps == "True":
12062 myoptions.root_deps = True
12066 if myoptions.jobs == "True":
12070 jobs = int(myoptions.jobs)
12074 if jobs is not True and \
12078 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
12079 (myoptions.jobs,), noiselevel=-1)
12081 myoptions.jobs = jobs
12083 if myoptions.load_average:
12085 load_average = float(myoptions.load_average)
12089 if load_average <= 0.0:
12090 load_average = None
12092 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
12093 (myoptions.load_average,), noiselevel=-1)
12095 myoptions.load_average = load_average
12097 for myopt in options:
12098 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
12100 myopts[myopt] = True
12102 for myopt in argument_options:
12103 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
12107 if myoptions.searchdesc:
12108 myoptions.search = True
12110 for action_opt in actions:
12111 v = getattr(myoptions, action_opt.replace("-", "_"))
12114 multiple_actions(myaction, action_opt)
12116 myaction = action_opt
12118 if myaction is None and myoptions.deselect is True:
12119 myaction = 'deselect'
12123 return myaction, myopts, myfiles
12125 def validate_ebuild_environment(trees):
12126 for myroot in trees:
12127 settings = trees[myroot]["vartree"].settings
12128 settings.validate()
12130 def clear_caches(trees):
12131 for d in trees.itervalues():
12132 d["porttree"].dbapi.melt()
12133 d["porttree"].dbapi._aux_cache.clear()
12134 d["bintree"].dbapi._aux_cache.clear()
12135 d["bintree"].dbapi._clear_cache()
12136 d["vartree"].dbapi.linkmap._clear_cache()
12137 portage.dircache.clear()
12140 def load_emerge_config(trees=None):
12142 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
12143 v = os.environ.get(envvar, None)
12144 if v and v.strip():
12146 trees = portage.create_trees(trees=trees, **kwargs)
12148 for root, root_trees in trees.iteritems():
12149 settings = root_trees["vartree"].settings
12150 setconfig = load_default_config(settings, root_trees)
12151 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
12153 settings = trees["/"]["vartree"].settings
12155 for myroot in trees:
12157 settings = trees[myroot]["vartree"].settings
12160 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
12161 mtimedb = portage.MtimeDB(mtimedbfile)
12163 return settings, trees, mtimedb
12165 def adjust_config(myopts, settings):
12166 """Make emerge specific adjustments to the config."""
12168 # To enhance usability, make some vars case insensitive by forcing them to
12170 for myvar in ("AUTOCLEAN", "NOCOLOR"):
12171 if myvar in settings:
12172 settings[myvar] = settings[myvar].lower()
12173 settings.backup_changes(myvar)
12176 # Kill noauto as it will break merges otherwise.
12177 if "noauto" in settings.features:
12178 settings.features.remove('noauto')
12179 settings['FEATURES'] = ' '.join(sorted(settings.features))
12180 settings.backup_changes("FEATURES")
12184 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
12185 except ValueError, e:
12186 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12187 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
12188 settings["CLEAN_DELAY"], noiselevel=-1)
12189 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
12190 settings.backup_changes("CLEAN_DELAY")
12192 EMERGE_WARNING_DELAY = 10
12194 EMERGE_WARNING_DELAY = int(settings.get(
12195 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
12196 except ValueError, e:
12197 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12198 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
12199 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
12200 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
12201 settings.backup_changes("EMERGE_WARNING_DELAY")
12203 if "--quiet" in myopts:
12204 settings["PORTAGE_QUIET"]="1"
12205 settings.backup_changes("PORTAGE_QUIET")
12207 if "--verbose" in myopts:
12208 settings["PORTAGE_VERBOSE"] = "1"
12209 settings.backup_changes("PORTAGE_VERBOSE")
12211 # Set so that configs will be merged regardless of remembered status
12212 if ("--noconfmem" in myopts):
12213 settings["NOCONFMEM"]="1"
12214 settings.backup_changes("NOCONFMEM")
12216 # Set various debug markers... They should be merged somehow.
12219 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
12220 if PORTAGE_DEBUG not in (0, 1):
12221 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
12222 PORTAGE_DEBUG, noiselevel=-1)
12223 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
12226 except ValueError, e:
12227 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12228 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
12229 settings["PORTAGE_DEBUG"], noiselevel=-1)
12231 if "--debug" in myopts:
12233 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
12234 settings.backup_changes("PORTAGE_DEBUG")
12236 if settings.get("NOCOLOR") not in ("yes","true"):
12237 portage.output.havecolor = 1
12239 """The explicit --color < y | n > option overrides the NOCOLOR environment
12240 variable and stdout auto-detection."""
12241 if "--color" in myopts:
12242 if "y" == myopts["--color"]:
12243 portage.output.havecolor = 1
12244 settings["NOCOLOR"] = "false"
12246 portage.output.havecolor = 0
12247 settings["NOCOLOR"] = "true"
12248 settings.backup_changes("NOCOLOR")
12249 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
12250 portage.output.havecolor = 0
12251 settings["NOCOLOR"] = "true"
12252 settings.backup_changes("NOCOLOR")
12254 def apply_priorities(settings):
12258 def nice(settings):
12260 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
12261 except (OSError, ValueError), e:
12262 out = portage.output.EOutput()
12263 out.eerror("Failed to change nice value to '%s'" % \
12264 settings["PORTAGE_NICENESS"])
12265 out.eerror("%s\n" % str(e))
12267 def ionice(settings):
12269 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
12271 ionice_cmd = shlex.split(ionice_cmd)
12275 from portage.util import varexpand
12276 variables = {"PID" : str(os.getpid())}
12277 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
12280 rval = portage.process.spawn(cmd, env=os.environ)
12281 except portage.exception.CommandNotFound:
12282 # The OS kernel probably doesn't support ionice,
12283 # so return silently.
12286 if rval != os.EX_OK:
12287 out = portage.output.EOutput()
12288 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
12289 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
12291 def display_missing_pkg_set(root_config, set_name):
12294 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
12295 "The following sets exist:") % \
12296 colorize("INFORM", set_name))
12299 for s in sorted(root_config.sets):
12300 msg.append(" %s" % s)
12303 writemsg_level("".join("%s\n" % l for l in msg),
12304 level=logging.ERROR, noiselevel=-1)
12306 def expand_set_arguments(myfiles, myaction, root_config):
12308 setconfig = root_config.setconfig
12310 sets = setconfig.getSets()
12312 # In order to know exactly which atoms/sets should be added to the
12313 # world file, the depgraph performs set expansion later. It will get
12314 # confused about where the atoms came from if it's not allowed to
12315 # expand them itself.
12316 do_not_expand = (None, )
12319 if a in ("system", "world"):
12320 newargs.append(SETPREFIX+a)
12327 # separators for set arguments
12331 # WARNING: all operators must be of equal length
12333 DIFF_OPERATOR = "-@"
12334 UNION_OPERATOR = "+@"
12336 for i in range(0, len(myfiles)):
12337 if myfiles[i].startswith(SETPREFIX):
12340 x = myfiles[i][len(SETPREFIX):]
12343 start = x.find(ARG_START)
12344 end = x.find(ARG_END)
12345 if start > 0 and start < end:
12346 namepart = x[:start]
12347 argpart = x[start+1:end]
12349 # TODO: implement proper quoting
12350 args = argpart.split(",")
12354 k, v = a.split("=", 1)
12357 options[a] = "True"
12358 setconfig.update(namepart, options)
12359 newset += (x[:start-len(namepart)]+namepart)
12360 x = x[end+len(ARG_END):]
12364 myfiles[i] = SETPREFIX+newset
12366 sets = setconfig.getSets()
12368 # display errors that occured while loading the SetConfig instance
12369 for e in setconfig.errors:
12370 print colorize("BAD", "Error during set creation: %s" % e)
12372 # emerge relies on the existance of sets with names "world" and "system"
12373 required_sets = ("world", "system")
12376 for s in required_sets:
12378 missing_sets.append(s)
12380 if len(missing_sets) > 2:
12381 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
12382 missing_sets_str += ', and "%s"' % missing_sets[-1]
12383 elif len(missing_sets) == 2:
12384 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
12386 missing_sets_str = '"%s"' % missing_sets[-1]
12387 msg = ["emerge: incomplete set configuration, " + \
12388 "missing set(s): %s" % missing_sets_str]
12390 msg.append(" sets defined: %s" % ", ".join(sets))
12391 msg.append(" This usually means that '%s'" % \
12392 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
12393 msg.append(" is missing or corrupt.")
12395 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
12397 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
12400 if a.startswith(SETPREFIX):
12401 # support simple set operations (intersection, difference and union)
12402 # on the commandline. Expressions are evaluated strictly left-to-right
12403 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
12404 expression = a[len(SETPREFIX):]
12407 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
12408 is_pos = expression.rfind(IS_OPERATOR)
12409 diff_pos = expression.rfind(DIFF_OPERATOR)
12410 union_pos = expression.rfind(UNION_OPERATOR)
12411 op_pos = max(is_pos, diff_pos, union_pos)
12412 s1 = expression[:op_pos]
12413 s2 = expression[op_pos+len(IS_OPERATOR):]
12414 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
12416 display_missing_pkg_set(root_config, s2)
12418 expr_sets.insert(0, s2)
12419 expr_ops.insert(0, op)
12421 if not expression in sets:
12422 display_missing_pkg_set(root_config, expression)
12424 expr_sets.insert(0, expression)
12425 result = set(setconfig.getSetAtoms(expression))
12426 for i in range(0, len(expr_ops)):
12427 s2 = setconfig.getSetAtoms(expr_sets[i+1])
12428 if expr_ops[i] == IS_OPERATOR:
12429 result.intersection_update(s2)
12430 elif expr_ops[i] == DIFF_OPERATOR:
12431 result.difference_update(s2)
12432 elif expr_ops[i] == UNION_OPERATOR:
12435 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
12436 newargs.extend(result)
12438 s = a[len(SETPREFIX):]
12440 display_missing_pkg_set(root_config, s)
12442 setconfig.active.append(s)
12444 set_atoms = setconfig.getSetAtoms(s)
12445 except portage.exception.PackageSetNotFound, e:
12446 writemsg_level(("emerge: the given set '%s' " + \
12447 "contains a non-existent set named '%s'.\n") % \
12448 (s, e), level=logging.ERROR, noiselevel=-1)
12450 if myaction in unmerge_actions and \
12451 not sets[s].supportsOperation("unmerge"):
12452 sys.stderr.write("emerge: the given set '%s' does " % s + \
12453 "not support unmerge operations\n")
12455 elif not set_atoms:
12456 print "emerge: '%s' is an empty set" % s
12457 elif myaction not in do_not_expand:
12458 newargs.extend(set_atoms)
12460 newargs.append(SETPREFIX+s)
12461 for e in sets[s].errors:
12465 return (newargs, retval)
12467 def repo_name_check(trees):
12468 missing_repo_names = set()
12469 for root, root_trees in trees.iteritems():
12470 if "porttree" in root_trees:
12471 portdb = root_trees["porttree"].dbapi
12472 missing_repo_names.update(portdb.porttrees)
12473 repos = portdb.getRepositories()
12475 missing_repo_names.discard(portdb.getRepositoryPath(r))
12476 if portdb.porttree_root in missing_repo_names and \
12477 not os.path.exists(os.path.join(
12478 portdb.porttree_root, "profiles")):
12479 # This is normal if $PORTDIR happens to be empty,
12480 # so don't warn about it.
12481 missing_repo_names.remove(portdb.porttree_root)
12483 if missing_repo_names:
12485 msg.append("WARNING: One or more repositories " + \
12486 "have missing repo_name entries:")
12488 for p in missing_repo_names:
12489 msg.append("\t%s/profiles/repo_name" % (p,))
12491 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
12492 "should be a plain text file containing a unique " + \
12493 "name for the repository on the first line.", 70))
12494 writemsg_level("".join("%s\n" % l for l in msg),
12495 level=logging.WARNING, noiselevel=-1)
12497 return bool(missing_repo_names)
12499 def repo_name_duplicate_check(trees):
12501 for root, root_trees in trees.iteritems():
12502 if 'porttree' in root_trees:
12503 portdb = root_trees['porttree'].dbapi
12504 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
12505 for repo_name, paths in portdb._ignored_repos:
12506 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
12507 ignored_repos.setdefault(k, []).extend(paths)
12511 msg.append('WARNING: One or more repositories ' + \
12512 'have been ignored due to duplicate')
12513 msg.append(' profiles/repo_name entries:')
12515 for k in sorted(ignored_repos):
12516 msg.append(' %s overrides' % (k,))
12517 for path in ignored_repos[k]:
12518 msg.append(' %s' % (path,))
12520 msg.extend(' ' + x for x in textwrap.wrap(
12521 "All profiles/repo_name entries must be unique in order " + \
12522 "to avoid having duplicates ignored. " + \
12523 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
12524 "/etc/make.conf if you would like to disable this warning."))
12525 writemsg_level(''.join('%s\n' % l for l in msg),
12526 level=logging.WARNING, noiselevel=-1)
12528 return bool(ignored_repos)
12530 def config_protect_check(trees):
12531 for root, root_trees in trees.iteritems():
12532 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
12533 msg = "!!! CONFIG_PROTECT is empty"
12535 msg += " for '%s'" % root
12536 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
12538 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
12540 if "--quiet" in myopts:
12541 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12542 print "!!! one of the following fully-qualified ebuild names instead:\n"
12543 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12544 print " " + colorize("INFORM", cp)
12547 s = search(root_config, spinner, "--searchdesc" in myopts,
12548 "--quiet" not in myopts, "--usepkg" in myopts,
12549 "--usepkgonly" in myopts)
12550 null_cp = portage.dep_getkey(insert_category_into_atom(
12552 cat, atom_pn = portage.catsplit(null_cp)
12553 s.searchkey = atom_pn
12554 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12557 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12558 print "!!! one of the above fully-qualified ebuild names instead.\n"
12560 def profile_check(trees, myaction, myopts):
12561 if myaction in ("info", "sync"):
12563 elif "--version" in myopts or "--help" in myopts:
12565 for root, root_trees in trees.iteritems():
12566 if root_trees["root_config"].settings.profiles:
12568 # generate some profile related warning messages
12569 validate_ebuild_environment(trees)
12570 msg = "If you have just changed your profile configuration, you " + \
12571 "should revert back to the previous configuration. Due to " + \
12572 "your current profile being invalid, allowed actions are " + \
12573 "limited to --help, --info, --sync, and --version."
12574 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
12575 level=logging.ERROR, noiselevel=-1)
12580 global portage # NFC why this is necessary now - genone
12581 portage._disable_legacy_globals()
12582 # Disable color until we're sure that it should be enabled (after
12583 # EMERGE_DEFAULT_OPTS has been parsed).
12584 portage.output.havecolor = 0
12585 # This first pass is just for options that need to be known as early as
12586 # possible, such as --config-root. They will be parsed again later,
12587 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
12588 # the value of --config-root).
12589 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
12590 if "--debug" in myopts:
12591 os.environ["PORTAGE_DEBUG"] = "1"
12592 if "--config-root" in myopts:
12593 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
12594 if "--root" in myopts:
12595 os.environ["ROOT"] = myopts["--root"]
12597 # Portage needs to ensure a sane umask for the files it creates.
12599 settings, trees, mtimedb = load_emerge_config()
12600 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12601 rval = profile_check(trees, myaction, myopts)
12602 if rval != os.EX_OK:
12605 if portage._global_updates(trees, mtimedb["updates"]):
12607 # Reload the whole config from scratch.
12608 settings, trees, mtimedb = load_emerge_config(trees=trees)
12609 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12611 xterm_titles = "notitles" not in settings.features
12614 if "--ignore-default-opts" not in myopts:
12615 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
12616 tmpcmdline.extend(sys.argv[1:])
12617 myaction, myopts, myfiles = parse_opts(tmpcmdline)
12619 if "--digest" in myopts:
12620 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
12621 # Reload the whole config from scratch so that the portdbapi internal
12622 # config is updated with new FEATURES.
12623 settings, trees, mtimedb = load_emerge_config(trees=trees)
12624 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12626 for myroot in trees:
12627 mysettings = trees[myroot]["vartree"].settings
12628 mysettings.unlock()
12629 adjust_config(myopts, mysettings)
12630 if '--pretend' not in myopts and myaction in \
12631 (None, 'clean', 'depclean', 'prune', 'unmerge'):
12632 mysettings["PORTAGE_COUNTER_HASH"] = \
12633 trees[myroot]["vartree"].dbapi._counter_hash()
12634 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
12636 del myroot, mysettings
12638 apply_priorities(settings)
12640 spinner = stdout_spinner()
12641 if "candy" in settings.features:
12642 spinner.update = spinner.update_scroll
12644 if "--quiet" not in myopts:
12645 portage.deprecated_profile_check(settings=settings)
12646 repo_name_check(trees)
12647 repo_name_duplicate_check(trees)
12648 config_protect_check(trees)
12650 for mytrees in trees.itervalues():
12651 mydb = mytrees["porttree"].dbapi
12652 # Freeze the portdbapi for performance (memoize all xmatch results).
12656 if "moo" in myfiles:
12659 Larry loves Gentoo (""" + platform.system() + """)
12661 _______________________
12662 < Have you mooed today? >
12663 -----------------------
12673 ext = os.path.splitext(x)[1]
12674 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
12675 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
12678 root_config = trees[settings["ROOT"]]["root_config"]
12679 if myaction == "list-sets":
12680 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
12684 # only expand sets for actions taking package arguments
12685 oldargs = myfiles[:]
12686 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
12687 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
12688 if retval != os.EX_OK:
12691 # Need to handle empty sets specially, otherwise emerge will react
12692 # with the help message for empty argument lists
12693 if oldargs and not myfiles:
12694 print "emerge: no targets left after set expansion"
12697 if ("--tree" in myopts) and ("--columns" in myopts):
12698 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
12701 if ("--quiet" in myopts):
12702 spinner.update = spinner.update_quiet
12703 portage.util.noiselimit = -1
12705 # Always create packages if FEATURES=buildpkg
12706 # Imply --buildpkg if --buildpkgonly
12707 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
12708 if "--buildpkg" not in myopts:
12709 myopts["--buildpkg"] = True
12711 # Always try and fetch binary packages if FEATURES=getbinpkg
12712 if ("getbinpkg" in settings.features):
12713 myopts["--getbinpkg"] = True
12715 if "--buildpkgonly" in myopts:
12716 # --buildpkgonly will not merge anything, so
12717 # it cancels all binary package options.
12718 for opt in ("--getbinpkg", "--getbinpkgonly",
12719 "--usepkg", "--usepkgonly"):
12720 myopts.pop(opt, None)
12722 if "--fetch-all-uri" in myopts:
12723 myopts["--fetchonly"] = True
12725 if "--skipfirst" in myopts and "--resume" not in myopts:
12726 myopts["--resume"] = True
12728 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
12729 myopts["--usepkgonly"] = True
12731 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
12732 myopts["--getbinpkg"] = True
12734 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
12735 myopts["--usepkg"] = True
12737 # Also allow -K to apply --usepkg/-k
12738 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
12739 myopts["--usepkg"] = True
12741 # Allow -p to remove --ask
12742 if "--pretend" in myopts:
12743 myopts.pop("--ask", None)
12745 # forbid --ask when not in a terminal
12746 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
12747 if ("--ask" in myopts) and (not sys.stdin.isatty()):
12748 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
12752 if settings.get("PORTAGE_DEBUG", "") == "1":
12753 spinner.update = spinner.update_quiet
12755 if "python-trace" in settings.features:
12756 import portage.debug
12757 portage.debug.set_trace(True)
12759 if not ("--quiet" in myopts):
12760 if not sys.stdout.isatty() or ("--nospinner" in myopts):
12761 spinner.update = spinner.update_basic
12763 if myaction == 'version':
12764 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12765 settings.profile_path, settings["CHOST"],
12766 trees[settings["ROOT"]]["vartree"].dbapi)
12768 elif "--help" in myopts:
12769 _emerge.help.help(myaction, myopts, portage.output.havecolor)
12772 if "--debug" in myopts:
12773 print "myaction", myaction
12774 print "myopts", myopts
12776 if not myaction and not myfiles and "--resume" not in myopts:
12777 _emerge.help.help(myaction, myopts, portage.output.havecolor)
12780 pretend = "--pretend" in myopts
12781 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
12782 buildpkgonly = "--buildpkgonly" in myopts
12784 # check if root user is the current user for the actions where emerge needs this
12785 if portage.secpass < 2:
12786 # We've already allowed "--version" and "--help" above.
12787 if "--pretend" not in myopts and myaction not in ("search","info"):
12788 need_superuser = myaction in ('clean', 'depclean', 'deselect',
12789 'prune', 'unmerge') or not \
12791 (buildpkgonly and secpass >= 1) or \
12792 myaction in ("metadata", "regen") or \
12793 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
12794 if portage.secpass < 1 or \
12797 access_desc = "superuser"
12799 access_desc = "portage group"
12800 # Always show portage_group_warning() when only portage group
12801 # access is required but the user is not in the portage group.
12802 from portage.data import portage_group_warning
12803 if "--ask" in myopts:
12804 myopts["--pretend"] = True
12805 del myopts["--ask"]
12806 print ("%s access is required... " + \
12807 "adding --pretend to options\n") % access_desc
12808 if portage.secpass < 1 and not need_superuser:
12809 portage_group_warning()
12811 sys.stderr.write(("emerge: %s access is required\n") \
12813 if portage.secpass < 1 and not need_superuser:
12814 portage_group_warning()
12817 disable_emergelog = False
12818 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
12820 disable_emergelog = True
12822 if myaction in ("search", "info"):
12823 disable_emergelog = True
12824 if disable_emergelog:
12825 """ Disable emergelog for everything except build or unmerge
12826 operations. This helps minimize parallel emerge.log entries that can
12827 confuse log parsers. We especially want it disabled during
12828 parallel-fetch, which uses --resume --fetchonly."""
12830 def emergelog(*pargs, **kargs):
12834 if 'EMERGE_LOG_DIR' in settings:
12836 # At least the parent needs to exist for the lock file.
12837 portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
12838 except portage.exception.PortageException, e:
12839 writemsg_level("!!! Error creating directory for " + \
12840 "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
12841 (settings['EMERGE_LOG_DIR'], e),
12842 noiselevel=-1, level=logging.ERROR)
12844 global _emerge_log_dir
12845 _emerge_log_dir = settings['EMERGE_LOG_DIR']
12847 if not "--pretend" in myopts:
12848 emergelog(xterm_titles, "Started emerge on: "+\
12849 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
12852 myelogstr=" ".join(myopts)
12854 myelogstr+=" "+myaction
12856 myelogstr += " " + " ".join(oldargs)
12857 emergelog(xterm_titles, " *** emerge " + myelogstr)
12860 def emergeexitsig(signum, frame):
12861 signal.signal(signal.SIGINT, signal.SIG_IGN)
12862 signal.signal(signal.SIGTERM, signal.SIG_IGN)
12863 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
12864 sys.exit(100+signum)
12865 signal.signal(signal.SIGINT, emergeexitsig)
12866 signal.signal(signal.SIGTERM, emergeexitsig)
12869 """This gets out final log message in before we quit."""
12870 if "--pretend" not in myopts:
12871 emergelog(xterm_titles, " *** terminating.")
12872 if "notitles" not in settings.features:
12874 portage.atexit_register(emergeexit)
12876 if myaction in ("config", "metadata", "regen", "sync"):
12877 if "--pretend" in myopts:
12878 sys.stderr.write(("emerge: The '%s' action does " + \
12879 "not support '--pretend'.\n") % myaction)
12882 if "sync" == myaction:
12883 return action_sync(settings, trees, mtimedb, myopts, myaction)
12884 elif "metadata" == myaction:
12885 action_metadata(settings, portdb, myopts)
12886 elif myaction=="regen":
12887 validate_ebuild_environment(trees)
12888 return action_regen(settings, portdb, myopts.get("--jobs"),
12889 myopts.get("--load-average"))
12891 elif "config"==myaction:
12892 validate_ebuild_environment(trees)
12893 action_config(settings, trees, myopts, myfiles)
12896 elif "search"==myaction:
12897 validate_ebuild_environment(trees)
12898 action_search(trees[settings["ROOT"]]["root_config"],
12899 myopts, myfiles, spinner)
12901 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
12902 validate_ebuild_environment(trees)
12903 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
12904 myopts, myaction, myfiles, spinner)
12905 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
12906 post_emerge(root_config, myopts, mtimedb, rval)
12909 elif myaction == 'info':
12911 # Ensure atoms are valid before calling unmerge().
12912 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12915 if is_valid_package_atom(x):
12917 valid_atoms.append(
12918 portage.dep_expand(x, mydb=vardb, settings=settings))
12919 except portage.exception.AmbiguousPackageName, e:
12920 msg = "The short ebuild name \"" + x + \
12921 "\" is ambiguous. Please specify " + \
12922 "one of the following " + \
12923 "fully-qualified ebuild names instead:"
12924 for line in textwrap.wrap(msg, 70):
12925 writemsg_level("!!! %s\n" % (line,),
12926 level=logging.ERROR, noiselevel=-1)
12928 writemsg_level(" %s\n" % colorize("INFORM", i),
12929 level=logging.ERROR, noiselevel=-1)
12930 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12934 msg.append("'%s' is not a valid package atom." % (x,))
12935 msg.append("Please check ebuild(5) for full details.")
12936 writemsg_level("".join("!!! %s\n" % line for line in msg),
12937 level=logging.ERROR, noiselevel=-1)
12940 return action_info(settings, trees, myopts, valid_atoms)
12942 # "update", "system", or just process files:
12944 validate_ebuild_environment(trees)
12947 if x.startswith(SETPREFIX) or \
12948 is_valid_package_atom(x):
12950 if x[:1] == os.sep:
12958 msg.append("'%s' is not a valid package atom." % (x,))
12959 msg.append("Please check ebuild(5) for full details.")
12960 writemsg_level("".join("!!! %s\n" % line for line in msg),
12961 level=logging.ERROR, noiselevel=-1)
12964 if "--pretend" not in myopts:
12965 display_news_notification(root_config, myopts)
12966 retval = action_build(settings, trees, mtimedb,
12967 myopts, myaction, myfiles, spinner)
12968 root_config = trees[settings["ROOT"]]["root_config"]
12969 post_emerge(root_config, myopts, mtimedb, retval)